From ee24cb8a435776677e326c54a01892aa54529134 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:33:33 -0500 Subject: [PATCH 01/76] fix: Fix PDF summarization prompt in Gemini 1.5 Pro Notebook (#1151) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Edited PDF Summarization prompt to resolve issue where PDF was not read. Seems to be just a weird model quirk. Doesn't affect 1.5 Flash. Fixes #754 🦕 --- .../intro_gemini_1_5_pro.ipynb | 61 +++++++++---------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/gemini/getting-started/intro_gemini_1_5_pro.ipynb b/gemini/getting-started/intro_gemini_1_5_pro.ipynb index 1b3735ff9b..59cda2f42e 100644 --- a/gemini/getting-started/intro_gemini_1_5_pro.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_pro.ipynb @@ -29,7 +29,7 @@ "id": "7yVV6txOmNMn" }, "source": [ - "# Getting started with the Vertex AI Gemini 1.5 Pro\n", + "# Getting started with Vertex AI Gemini 1.5 Pro\n", "\n", "\n", "\n", @@ -105,7 +105,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": { "id": "tFy3H3aPgx12" }, @@ -195,7 +195,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 1, "metadata": { "id": "Nqwi-5ufWp_B" }, @@ -220,7 +220,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 2, "metadata": { "id": "lslYAvw37JGQ" }, @@ -253,7 +253,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 3, "metadata": { "id": "U7ExWmuLBdIA" }, @@ -277,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 4, "metadata": { "id": "FhFxrtfdSwOP" }, @@ -286,8 +286,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "total_tokens: 14\n", - "total_billable_characters: 29\n", + "total_tokens: 32\n", + "total_billable_characters: 108\n", "\n", "\n", "Answer:\n", @@ -295,7 +295,7 @@ "\n", "\n", "Usage metadata:\n", - "{'prompt_token_count': 14, 'candidates_token_count': 8, 'total_token_count': 22}\n", + "{'prompt_token_count': 32, 'candidates_token_count': 8, 'total_token_count': 40}\n", "\n", "Finish reason:\n", "1\n", @@ -303,24 +303,24 @@ "Safety settings:\n", "[category: HARM_CATEGORY_HATE_SPEECH\n", "probability: NEGLIGIBLE\n", - "probability_score: 0.15077754855155945\n", + "probability_score: 0.155273438\n", "severity: HARM_SEVERITY_NEGLIGIBLE\n", - "severity_score: 0.07821886986494064\n", + "severity_score: 0.0737304688\n", ", category: HARM_CATEGORY_DANGEROUS_CONTENT\n", "probability: NEGLIGIBLE\n", - "probability_score: 0.06730107963085175\n", + "probability_score: 0.0727539062\n", "severity: HARM_SEVERITY_NEGLIGIBLE\n", - "severity_score: 0.09089674800634384\n", + "severity_score: 0.0913085938\n", ", category: HARM_CATEGORY_HARASSMENT\n", "probability: NEGLIGIBLE\n", - "probability_score: 0.1252792477607727\n", + "probability_score: 0.134765625\n", "severity: HARM_SEVERITY_NEGLIGIBLE\n", - "severity_score: 0.08525123447179794\n", + "severity_score: 0.0815429688\n", ", category: HARM_CATEGORY_SEXUALLY_EXPLICIT\n", "probability: NEGLIGIBLE\n", - "probability_score: 0.21060390770435333\n", + "probability_score: 0.232421875\n", "severity: HARM_SEVERITY_NEGLIGIBLE\n", - "severity_score: 0.11260009557008743\n", + "severity_score: 0.125\n", "]\n" ] } @@ -606,7 +606,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 8, "metadata": { "id": "JgKDIZUstYwV" }, @@ -615,19 +615,18 @@ "name": "stdout", "output_type": "stream", "text": [ - "## Summary of \"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context\"\n", - "\n", - "**Gemini 1.5 Pro** is a new large language model (LLM) from Google DeepMind capable of processing and understanding extremely long sequences of information across various modalities like text, code, images, audio, and video. It utilizes a mixture-of-experts architecture and achieves state-of-the-art performance on many tasks while being significantly more efficient than previous models. \n", + "This is a technical report introducing Gemini 1.5 Pro, Google's latest multi-modal model. The model is built upon the mixture-of-experts (MoE) architecture and exhibits impressive performance on reasoning, multi-modality, and long context understanding. Gemini 1.5 Pro distinguishes itself by expanding the context window size to several million tokens, a significant leap beyond the 200k tokens offered by its predecessor, Claude 2.1. This expanded capacity allows for processing nearly five days of audio, entire books, or extensive code repositories. \n", "\n", - "**Key advancements and findings:**\n", + "The report highlights the model's abilities through: \n", + "* **Qualitative examples:** Showcasing impressive feats such as pinpointing specific code within the complete JAX codebase, learning to translate a new language from a single grammar book and dictionary, and identifying a scene from Les Misérables based on a hand-drawn sketch. \n", + "* **Quantitative evaluations:** \n", + " * **Diagnostic:** demonstrating near-perfect recall in \"needle-in-a-haystack\" tasks across text, video, and audio, even maintaining high recall with context lengths extending to 10 million tokens. \n", + " * **Realistic:** excelling in long-document QA using Les Misérables as context, outperforming competitors on long-video QA tasks, and showing significant progress in long-context automatic speech recognition. \n", + " * **Core Capabilities:** Surpassing the performance of its predecessor (Gemini 1.0) and rivaling or exceeding the performance of a state-of-the-art model, Gemini 1.0 Ultra, on core benchmarks related to coding, math, science, reasoning, and instruction following. \n", "\n", - "* **Unprecedented context length:** Gemini 1.5 Pro can handle up to 10 million tokens of context, enabling it to process information like entire books, days-long audio recordings, and hours of video. This opens up new possibilities for applications like analyzing large datasets, summarizing documents, and understanding complex video content.\n", - "* **Improved performance across modalities:** The model surpasses its predecessors and even matches or exceeds the performance of state-of-the-art models like Gemini 1.0 Ultra on various benchmarks across text (e.g., reasoning, math, coding), vision, and audio understanding.\n", - "* **In-context learning:** Gemini 1.5 Pro showcases the ability to learn new skills like translating languages (e.g., English to Kalamang) with very limited data by providing the necessary reference materials directly in the context. This has implications for supporting low-resource languages and facilitating cross-lingual communication.\n", - "* **Diagnostic and realistic evaluations:** The researchers developed new benchmarks and evaluation methodologies to assess the long-context capabilities of the model, including \"needle-in-a-haystack\" tasks for different modalities and question answering from long documents and videos.\n", - "* **Responsible AI practices:** Google DeepMind emphasizes its commitment to responsible deployment by conducting impact assessments, implementing model safety mitigations, and evaluating potential risks and biases. \n", + "The report also delves into the responsible development and deployment of the model, emphasizing their approach to impact assessment, model mitigations, and ongoing safety evaluations. \n", "\n", - "**Overall, Gemini 1.5 Pro represents a significant leap forward in LLM research, demonstrating the potential of long-context understanding and multimodal capabilities for various applications while emphasizing the importance of responsible development and deployment.** \n", + "In conclusion, Gemini 1.5 Pro represents a significant advancement in AI, showcasing unprecedented capabilities in long-context understanding across multiple modalities. The report emphasizes the need for novel evaluation methods to better assess the potential of such models and suggests promising avenues for future research. \n", "\n" ] } @@ -636,12 +635,12 @@ "pdf_file_uri = \"gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf\"\n", "\n", "prompt = \"\"\"\n", - " Your are a very professional document summarization specialist.\n", - " Please summarize the given document.\n", + " You are a very professional document summarization specialist.\n", + " Summarize the given document.\n", "\"\"\"\n", "\n", "pdf_file = Part.from_uri(pdf_file_uri, mime_type=\"application/pdf\")\n", - "contents = [pdf_file, prompt]\n", + "contents = [prompt, pdf_file]\n", "\n", "response = model.generate_content(contents)\n", "print(response.text)" From 14d843303d35242f7440e29181273346b1acf152 Mon Sep 17 00:00:00 2001 From: nhootan <103317089+nhootan@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:43:44 -0400 Subject: [PATCH 02/76] refactor: Add doc links to VAPO notebook (#1156) Co-authored-by: hootan Co-authored-by: Owl Bot --- .../vertex_ai_prompt_optimizer_ui.ipynb | 112 ++++++++++++------ 1 file changed, 76 insertions(+), 36 deletions(-) diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb index 940cd61a1c..93d820f28e 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb @@ -29,6 +29,7 @@ "id": "RN8N3O43QDT5" }, "source": [ + "# Vertex Prompt Optimizer Notebook UI (Preview)\n", "
\n", "
\n", " \n", @@ -60,13 +61,51 @@ }, "source": [ "# Overview\n", - "Welcome to Vertex AI Prompt Optimizer (VAPO)! This Notebook showcases VAPO, a tool that iteratively optimizes prompts to suit a target model (e.g., `gemini-1.5-pro`) using target-specific metric(s).\n", + "This Notebook showcases the Vertex AI prompt optimizer, a tool that iteratively optimizes prompts to suit a target model (e.g., `gemini-1.5-pro`) using target-specific metric(s).\n", "\n", "Key Use Cases:\n", "\n", "* Prompt Optimization: Enhance the quality of an initial prompt by refining its structure and content to match the target model's optimal input characteristics.\n", "\n", - "* Prompt Translation: Adapt prompts optimized for one model to work effectively with a different target model." + "* Prompt Translation: Adapt prompts optimized for one model to work effectively with a different target model.\n", + "\n", + "For the detailed documentation please see [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Getting Started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "Authenticate your environment on Google Colab.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" ] }, { @@ -96,12 +135,15 @@ "id": "-p59jd5rOp4q" }, "source": [ - "# Step 1: Configure your prompt template\n", - "Prompts consist of two key parts:\n", - "* System Instruction (SI) Template: A fixed instruction shared across all queries for a given task.\n", - "* Task/Context Template: A dynamic part that changes based on the task.\n", + "# Step 1: Create a prompt template and system instructions\n", + "Provide your system intruction and prompt template below. Refer to [here]( https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#template-si) for instructions.\n", + "\n", + "Prompts consist of two key components:\n", + "\n", + "- System Instruction: System instruction is the instruction that get passed to the model before any user input in the prompt. This is the fixed part of the prompt template shared across all queries for a given task.\n", + "- Prompt template: A task is the text in the prompt that you want the model to provide a response for. Context is information that you include in the prompt that the model uses or references when generating a response. These are the dynamic parts of the prompt template that changes based on the task.\n", "\n", - "APD enables the translation and optimization of the System Instruction Template, while the Task/Context Template remains essential for evaluating different SI templates." + "Prompt Optimizer enables the optimization or translation of the System Instruction template, while the prompt template remains essential for evaluating and selecting the best System Instruction template." ] }, { @@ -122,13 +164,16 @@ "id": "5y-cmg0TQP6v" }, "source": [ - "# Step 2: Input your data\n", - "To optimize the model, provide a CSV or JSONL file containing labeled validation samples\n", - "* Focus on examples that specifically demonstrate the issues you want to address.\n", - "* Recommendation: Use 50-100 distinct samples for reliable results. However, the tool can still be effective with as few as 5 samples.\n", + "# Step 2: Configure project settings\n", + "To optimize the prompt for your target Google model, provide a CSV or JSONL file containing labeled validation samples (input, ground truth output pairs). Refer to [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#prepare-sample-prompts) for instructions.\n", "\n", - "For prompt translation:\n", - "* Consider using the source model to label examples that the target model struggles with, helping to identify areas for improvement.\n" + "Focus on examples that specifically demonstrate the issues you want to address.\n", + "Recommendation: Use 50-100 distinct samples for reliable results. However, the tool can still be effective with as few as 5 samples.\n", + "For prompt translation (e.g. 3P model to Google model, PaLM 2 to Gemini):\n", + "\n", + "Consider using the source model to label examples that the target model struggles with, helping to identify areas for improvement.\n", + "When you select a source model, you don't need to provide labels for the input examples.\n", + "While the source model selection is limited to Google models, it still supports labeled inputs from non-Google models. If you wish to select a non-Google source model, you will need to provide labels for your input examples.\n" ] }, { @@ -143,9 +188,7 @@ "PROJECT_ID = \"[YOUR_PROJECT]\" # @param {type:\"string\"}\n", "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", "OUTPUT_PATH = \"[OUTPUT_PATH]\" # @param {type:\"string\"}\n", - "# @markdown * GCS path of your bucket, e.g., gs://prompt_translation_demo, used to store all artifacts.\n", - "INPUT_DATA_PATH = \"[INPUT_DATA_PATH]\" # @param {type:\"string\"}\n", - "# @markdown * Specify a GCS path for the input data, e.g., gs://prompt_translation_demo/input_data.jsonl." + "INPUT_DATA_PATH = \"[INPUT_DATA_PATH]\" # @param {type:\"string\"}" ] }, { @@ -155,7 +198,14 @@ }, "source": [ "# Step 3: Configure optimization settings\n", - "The optimization configs are defaulted to the values that are most commonly used and which we recommend using initially." + "The optimization configurations are defaulted to the values that are most commonly used, which we recommend using as the initial set-up.\n", + "\n", + "The most important settings are:\n", + "\n", + "Target Model: Which model you are trying to optimize your prompts to.\n", + "Optimization Mode: The mode in which you are trying to optimize your prompt with.\n", + "Evaluation Metrics: The evaluation metrics in which you are trying to optimize your prompts against.\n", + "Refer [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#configuration) to learn more about the different configuration settings and how to best utilize them." ] }, { @@ -167,7 +217,6 @@ "outputs": [], "source": [ "SOURCE_MODEL = \"\" # @param [\"\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"]\n", - "# @markdown * If set, it will be used to generate ground truth responses for the input examples. This is useful to migrate the prompt from a source model.\n", "TARGET_MODEL = \"gemini-1.5-flash-001\" # @param [\"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\"]\n", "OPTIMIZATION_MODE = \"instruction_and_demo\" # @param [\"instruction\", \"demonstration\", \"instruction_and_demo\"]\n", "EVAL_METRIC = \"question_answering_correctness\" # @param [\"bleu\", \"coherence\", \"exact_match\", \"fluency\", \"groundedness\", \"text_quality\", \"verbosity\", \"rouge_1\", \"rouge_2\", \"rouge_l\", \"rouge_l_sum\", \"safety\", \"question_answering_correctness\", \"question_answering_quality\", \"summarization_quality\", \"tool_name_match\", \"tool_parameter_key_match\", \"tool_parameter_kv_match\", \"tool_call_valid\"] {type:\"string\"}" @@ -179,7 +228,8 @@ "id": "kO7fO0qTSNLs" }, "source": [ - "# Step 4: Configure advanced optimization settings [Optional]" + "# Step 4: Configure advanced optimization settings [Optional]\n", + "Refer [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#configuration) to learn more about the different configuration settings and how to best utilize them." ] }, { @@ -193,18 +243,15 @@ "# @markdown **Instruction Optimization Configs**:
\n", "NUM_INST_OPTIMIZATION_STEPS = 10 # @param {type:\"integer\"}\n", "NUM_TEMPLATES_PER_STEP = 2 # @param {type:\"integer\"}\n", - "# @markdown * Number of prompt templates generated and evaluated at each optimization step.\n", "\n", "# @markdown **Demonstration Optimization Configs**:
\n", "NUM_DEMO_OPTIMIZATION_STEPS = 10 # @param {type:\"integer\"}\n", "NUM_DEMO_PER_PROMPT = 3 # @param {type:\"integer\"}\n", - "# @markdown * Number of the demonstrations to include in each prompt.\n", "\n", "# @markdown **Model Configs**:
\n", "TARGET_MODEL_QPS = 3.0 # @param {type:\"number\"}\n", "SOURCE_MODEL_QPS = 3.0 # @param {type:\"number\"}\n", "EVAL_QPS = 3.0 # @param {type:\"number\"}\n", - "# @markdown * The QPS for calling the eval model, which is currently gemini-1.5-pro-001.\n", "\n", "# @markdown **Multi-metric Configs**:
\n", "# @markdown Use this section only if you need more than one metric for optimization. This will override the metric you picked above.\n", @@ -218,11 +265,8 @@ "\n", "# @markdown **Misc Configs**:
\n", "PLACEHOLDER_TO_VALUE = \"{}\" # @param\n", - "# @markdown * This variable is used for long prompt optimization to not optimize parts of prompt identified by placeholders. It provides a mapping from the placeholder variables to their content. See link for details.\n", "RESPONSE_MIME_TYPE = \"application/json\" # @param [\"text/plain\", \"application/json\"]\n", - "# @markdown * This variable determines the format of the output for the target model. See link for details.\n", - "TARGET_LANGUAGE = \"English\" # @param [\"English\", \"French\", \"German\", \"Hebrew\", \"Hindi\", \"Japanese\", \"Korean\", \"Portuguese\", \"Simplified Chinese\", \"Spanish\", \"Traditional Chinese\"]\n", - "# @markdown * The language of the system instruction." + "TARGET_LANGUAGE = \"English\" # @param [\"English\", \"French\", \"German\", \"Hebrew\", \"Hindi\", \"Japanese\", \"Korean\", \"Portuguese\", \"Simplified Chinese\", \"Spanish\", \"Traditional Chinese\"]" ] }, { @@ -231,7 +275,8 @@ "id": "X7Mgb0EHSSFk" }, "source": [ - "# Step 5: Run Prompt Optimizer" + "# Step 5: Run Prompt Optimizer\n", + "A progress bar will appear to let you know how long the job takes." ] }, { @@ -244,7 +289,6 @@ "source": [ "import datetime\n", "import json\n", - "import os\n", "import time\n", "\n", "from google.colab import auth\n", @@ -252,10 +296,6 @@ "timestamp = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n", "display_name = f\"pt_{timestamp}\"\n", "\n", - "in_colab_enterprise = \"GOOGLE_CLOUD_PROJECT\" in os.environ\n", - "if not in_colab_enterprise:\n", - " auth.authenticate_user()\n", - "\n", "label_enforced = vapo_lib.is_run_target_required(\n", " [\n", " EVAL_METRIC,\n", @@ -329,9 +369,10 @@ "id": "lo5mcTzwSgBP" }, "source": [ - "# Step 6: Inspect the Results\n", - "You can use the following cell to inspect all the predictions made by all the\n", - "generated templates during one or multiple VAPO runs." + "# Step 6: Inspect the results\n", + "For a clearer look at the specific responses generated by each prompt template during the optimization process, use the cell below.\n", + "This will allow you to inspect all the predictions made by all the\n", + "generated templates during one or multiple vertex prompt optimizer runs.\n" ] }, { @@ -345,7 +386,6 @@ "from IPython.display import HTML, display\n", "\n", "RESULT_PATH = \"[OUTPUT_PATH]\" # @param {type:\"string\"}\n", - "# @markdown * Specify a GCS path that contains artifacts of a single or multiple VAPO runs.\n", "\n", "results_ui = vapo_lib.ResultsUI(RESULT_PATH)\n", "\n", From c0d98af4387e987005c12847192adc1ce04b46b4 Mon Sep 17 00:00:00 2001 From: nhootan <103317089+nhootan@users.noreply.github.com> Date: Tue, 24 Sep 2024 07:41:08 -0400 Subject: [PATCH 03/76] refactor: Adding document links to VAPO notebook plus small edits. (#1155) # Description # Adding cloud document missing links to VAPO notebook --------- Co-authored-by: hootan Co-authored-by: Owl Bot From fd8af8d2771ee0c919ecab321e03689b2ef698e0 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Tue, 24 Sep 2024 08:16:50 -0400 Subject: [PATCH 04/76] refactor: Update the example model for batch predictions (#1159) # Description Update the example model for batch predictions. Was: gemini-1.5-flash-001 Now: gemini-1.5-flash-002 --- gemini/batch-prediction/intro_batch_prediction.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gemini/batch-prediction/intro_batch_prediction.ipynb b/gemini/batch-prediction/intro_batch_prediction.ipynb index 57c1aa0f01..dbfe70d1b0 100644 --- a/gemini/batch-prediction/intro_batch_prediction.ipynb +++ b/gemini/batch-prediction/intro_batch_prediction.ipynb @@ -266,7 +266,7 @@ "\n", "You can find a list of the Gemini models that support batch predictions in the [Multimodal models that support batch predictions](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini#multimodal_models_that_support_batch_predictions) page.\n", "\n", - "This tutorial uses the Gemini 1.5 Flash (`gemini-1.5-flash-001`) model." + "This tutorial uses the Gemini 1.5 Flash (`gemini-1.5-flash-002`) model." ] }, { @@ -277,7 +277,7 @@ }, "outputs": [], "source": [ - "MODEL_ID = \"gemini-1.5-flash-001\" # @param {type:\"string\", isTemplate: true}\n", + "MODEL_ID = \"gemini-1.5-flash-002\" # @param {type:\"string\", isTemplate: true}\n", "\n", "model = GenerativeModel(MODEL_ID)" ] From ed654552aa625d5b98959660af6dde8a74ffb171 Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:21:04 +0200 Subject: [PATCH 05/76] feat(sdk): add vapo sdk notebook (#1154) # Description This notebook demostrates how to leverage Vertex AI Prompt Optimizer to efficiently migrate a prompt template from one model to another. - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- ..._with_vertex_ai_prompt_optimizer_sdk.ipynb | 1148 +++++++++++++++++ .../prompts/prompt_optimizer/utils/helpers.py | 357 +++++ 2 files changed, 1505 insertions(+) create mode 100644 gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb create mode 100644 gemini/prompts/prompt_optimizer/utils/helpers.py diff --git a/gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb b/gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb new file mode 100644 index 0000000000..98526b6428 --- /dev/null +++ b/gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb @@ -0,0 +1,1148 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Get started with Vertex Prompt Optimizer Notebook SDK (Preview)\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## I. Overview\n", + "\n", + "In the context of developing Generative AI (Gen AI) applications, prompt engineering poses challenges due to its time-consuming and error-prone nature. You often dedicate significant effort to crafting and inputting prompts to achieve successful task completion. Additionally, with the frequent release of foundational models, you face the additional burden of migrating working prompts from one model version to another.\n", + "\n", + "Vertex AI Prompt Optimizer aims to alleviate these challenges by providing you with an intelligent prompt optimization tool. With this tool you can both refine optimize system instruction (and task) in the prompts and selects the best demonstrations (few-shot examples) for prompt templates, empowering you to shape LLM responses from any source model to on a target Google model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4HKyj5KwYePX" + }, + "source": [ + "### Objective\n", + "\n", + "This notebook demostrates how to leverage Vertex AI Prompt Optimizer (Preview) to efficiently migrate a prompt template from one model to another. The goal is to use Vertex AI Prompt Optimizer (Preview) to find the new prompt template which generate the most correct and grounded responses.\n", + "\n", + "\n", + "This tutorial uses the following Google Cloud ML services and resources:\n", + "\n", + "- Vertex Gen AI\n", + "- Vertex AI Prompt Optimizer (Preview)\n", + "- Vertex AI Model Eval\n", + "- Vertex AI Custom job\n", + "\n", + "The steps performed include:\n", + "\n", + "- Prepare the prompt-ground truth pairs optimized for another model\n", + "- Define the prompt template you want to optimize\n", + "- Set target model and evaluation metric\n", + "- Set optimization mode and steps\n", + "- Run the automatic prompt optimization job\n", + "- Collect the best prompt template and eval metric\n", + "- Validate the best prompt template" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "08d289fa873f" + }, + "source": [ + "### Dataset\n", + "\n", + "The dataset is a question-answering dataset generated by a simple AI cooking assistant that provides suggestions on how to cook healthier dishes.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aed92deeb4a0" + }, + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "* Vertex AI\n", + "* Cloud Storage\n", + "\n", + "Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## II. Before you start" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK for Python and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "! pip3 install --upgrade --quiet 'google-cloud-aiplatform[evaluation]'\n", + "! pip3 install --upgrade --quiet 'plotly'\n", + "! pip3 install --upgrade --quiet 'asyncio' 'tqdm' 'tenacity' 'etils' 'importlib_resources' 'fsspec' 'gcsfs' 'nbformat>=4.2.0'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime (Colab only)\n", + "\n", + "To use the newly installed packages, you must restart the runtime on Google Colab." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " import IPython\n", + "\n", + " app = IPython.Application.instance()\n", + " app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "Authenticate your environment on Google Colab.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WReHDGG5g0XY" + }, + "source": [ + "#### Set your project ID and project number" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oM1iC_MfAts1" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "\n", + "# Set the project id\n", + "! gcloud config set project {PROJECT_ID}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oZpm-sL8f1z_" + }, + "outputs": [], + "source": [ + "PROJECT_NUMBER = \"[your-project-number]\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "region" + }, + "source": [ + "#### Region\n", + "\n", + "You can also change the `REGION` variable used by Vertex AI. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "I6FmBV2_0fBP" + }, + "outputs": [], + "source": [ + "REGION = \"us-central1\" # @param {type: \"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zgPO1eR3CYjk" + }, + "source": [ + "#### Create a Cloud Storage bucket\n", + "\n", + "Create a storage bucket to store intermediate artifacts such as datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MzGDU7TWdts_" + }, + "outputs": [], + "source": [ + "BUCKET_NAME = \"your-bucket-name-{PROJECT_ID}-unique\" # @param {type:\"string\"}\n", + "\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NIq7R4HZCfIc" + }, + "outputs": [], + "source": [ + "! gsutil mb -l {REGION} -p {PROJECT_ID} {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "set_service_account" + }, + "source": [ + "#### Service Account and permissions\n", + "\n", + "Vertex AI Automated Prompt Design requires a service account with the following permissions:\n", + "\n", + "- `Vertex AI User` to call Vertex LLM API\n", + "- `Storage Object Admin` to read and write to your GCS bucket.\n", + "- `Artifact Registry Reader` to download the pipeline template from Artifact Registry.\n", + "\n", + "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ssUJJqXJJHgC" + }, + "outputs": [], + "source": [ + "SERVICE_ACCOUNT = f\"{PROJECT_NUMBER}-compute@developer.gserviceaccount.com\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wqOHg5aid6HP" + }, + "outputs": [], + "source": [ + "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", + " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", + " --role=roles/aiplatform.user\n", + "\n", + "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", + " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", + " --role=roles/storage.objectAdmin\n", + "\n", + "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", + " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", + " --role=roles/artifactregistry.reader" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ek1-iTbPjzdJ" + }, + "source": [ + "### Set tutorial folder and workspace\n", + "\n", + "Set a folder to collect data and any tutorial artifacts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BbfKRabXj3la" + }, + "outputs": [], + "source": [ + "from pathlib import Path as path\n", + "\n", + "ROOT_PATH = path.cwd()\n", + "TUTORIAL_PATH = ROOT_PATH / \"tutorial\"\n", + "CONFIG_PATH = TUTORIAL_PATH / \"config\"\n", + "TUNED_PROMPT_PATH = TUTORIAL_PATH / \"tuned_prompts\"\n", + "\n", + "TUTORIAL_PATH.mkdir(parents=True, exist_ok=True)\n", + "CONFIG_PATH.mkdir(parents=True, exist_ok=True)\n", + "TUNED_PROMPT_PATH.mkdir(parents=True, exist_ok=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BaNdfftpXTIX" + }, + "source": [ + "Set the associated workspace on Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "joJPc3FmX1fk" + }, + "outputs": [], + "source": [ + "from etils import epath\n", + "\n", + "WORKSPACE_URI = epath.Path(BUCKET_URI) / \"prompt_migration_gemini\"\n", + "INPUT_DATA_URI = epath.Path(WORKSPACE_URI) / \"data\"\n", + "\n", + "WORKSPACE_URI.mkdir(parents=True, exist_ok=True)\n", + "INPUT_DATA_URI.mkdir(parents=True, exist_ok=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "960505627ddf" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PyQmSRbKA8r-" + }, + "outputs": [], + "source": [ + "# Tutorial\n", + "from argparse import Namespace\n", + "import json\n", + "\n", + "# General\n", + "import logging\n", + "import warnings\n", + "\n", + "from google.cloud import aiplatform\n", + "import pandas as pd\n", + "from utils.helpers import (\n", + " async_generate,\n", + " display_eval_report,\n", + " evaluate_task,\n", + " get_id,\n", + " get_optimization_result,\n", + " get_results_file_uris,\n", + " init_new_model,\n", + " plot_eval_metrics,\n", + " print_df_rows,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "820DIvw1o8tB" + }, + "source": [ + "### Libraries settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HKc4ZdUBo_SM" + }, + "outputs": [], + "source": [ + "warnings.filterwarnings(\"ignore\")\n", + "logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.ERROR)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "init_aip:mbsdk,all" + }, + "source": [ + "### Initialize Vertex AI SDK for Python\n", + "\n", + "Initialize the Vertex AI SDK for Python for your project." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bQMc2Uwf0fBQ" + }, + "outputs": [], + "source": [ + "aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gxc7q4r-DFH4" + }, + "source": [ + "### Define constants" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0Y5t67f3DHNm" + }, + "outputs": [], + "source": [ + "INPUT_DATA_FILE_URI = \"gs://github-repo/prompts/prompt_optimizer/rag_qa_dataset.jsonl\"\n", + "\n", + "EXPERIMENT_NAME = \"qa-prompt-eval\"\n", + "INPUT_TUNING_DATA_URI = epath.Path(WORKSPACE_URI) / \"tuning_data\"\n", + "INPUT_TUNING_DATA_FILE_URI = str(INPUT_DATA_URI / \"prompt_tuning.jsonl\")\n", + "OUTPUT_TUNING_DATA_URI = epath.Path(WORKSPACE_URI) / \"tuned_prompt\"\n", + "APD_CONTAINER_URI = (\n", + " \"us-docker.pkg.dev/vertex-ai-restricted/builtin-algorithm/apd:preview_v1_0\"\n", + ")\n", + "CONFIG_FILE_URI = str(WORKSPACE_URI / \"config\" / \"config.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdvJRUWRNGHE" + }, + "source": [ + "## III. Automated prompt design with Vertex AI Prompt Optimizer (Preview)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mmTotjRAJplw" + }, + "source": [ + "### Load the dataset\n", + "\n", + "Load the dataset from Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LA7aG08wJtVm" + }, + "outputs": [], + "source": [ + "prompt_tuning_df = pd.read_json(INPUT_DATA_FILE_URI, lines=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1xn-pz3v5HVK" + }, + "outputs": [], + "source": [ + "prompt_tuning_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PsXdJBJXiaVH" + }, + "outputs": [], + "source": [ + "print_df_rows(prompt_tuning_df, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E5SmBApC-WDg" + }, + "source": [ + "### Evaluate the previous model version in question-answering task\n", + "\n", + "Run an evaluation using Vertex AI Gen AI Evaluation Service to define question-answering baseline." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qA-dl76E-H23" + }, + "outputs": [], + "source": [ + "evaluation_qa_results = [\n", + " (\n", + " \"qa_eval_result_old_model\",\n", + " evaluate_task(\n", + " df=prompt_tuning_df,\n", + " prompt_col=\"prompt\",\n", + " reference_col=\"reference\",\n", + " response_col=\"answer\",\n", + " experiment_name=EXPERIMENT_NAME,\n", + " eval_metrics=[\"question_answering_quality\", \"groundedness\"],\n", + " eval_sample_n=len(prompt_tuning_df),\n", + " ),\n", + " )\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_9ZMmVHZfl5O" + }, + "source": [ + "Plot the evaluation metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yTZKlgOk-0qz" + }, + "outputs": [], + "source": [ + "plot_eval_metrics(evaluation_qa_results)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Rp1n1aMACzSW" + }, + "source": [ + "### Translate the prompt template with Vertex AI Prompt Optimizer (Preview)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "h1650lf3X8xW" + }, + "source": [ + "#### Prepare the prompt template you want to optimize\n", + "\n", + "A prompt consists of two key parts:\n", + "\n", + "* **System Instruction Template** which is a fixed part of the prompt shared across all queries for a given task.\n", + "\n", + "* **Prompt Template** which is a dynamic part of the prompt that changes based on the task.\n", + "\n", + "Vertex AI Prompt Optimizer enables the translation and optimization of the Instruction Template, while the Task/Context Template remains essential for evaluating different instruction templates.\n", + "\n", + "In this case, you want to translate a prompt\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Db8rHNC6DmtY" + }, + "outputs": [], + "source": [ + "SYSTEM_INSTRUCTION_TEMPLATE = \"\"\"\n", + "Given a question with some context, provide the correct answer to the question.\n", + "\"\"\"\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"\n", + "Some examples of correct answer to a question with context are:\n", + "Question: {{question}}\n", + "Answer: {{target}}\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a1TCgXsrXztm" + }, + "source": [ + "#### Prepare few samples\n", + "\n", + "Vertex AI Prompt optimizer requires a CSV or JSONL file containing labeled samples.\n", + "\n", + "For **prompt optimization**:\n", + "\n", + "* Focus on examples that specifically demonstrate the issues you want to address.\n", + "* Recommendation: Use 50-100 distinct samples for reliable results. However, the tool can still be effective with as few as 5 samples.\n", + "\n", + "For **prompt translation**:\n", + "\n", + "* Consider using the source model to label examples that the target model struggles with, helping to identify areas for improvement.\n", + "\n", + "Learn more about setting up your CSV or JSONL file as input [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vTIl_v9Ig1F-" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df = prompt_tuning_df.copy()\n", + "\n", + "# Prepare question and target columns\n", + "prepared_prompt_tuning_df[\"question\"] = (\n", + " prepared_prompt_tuning_df[\"user_question\"]\n", + " + \"\\nnContext:\\n\"\n", + " + prepared_prompt_tuning_df[\"context\"]\n", + ")\n", + "prepared_prompt_tuning_df = prepared_prompt_tuning_df.rename(\n", + " columns={\"reference\": \"target\"}\n", + ")\n", + "\n", + "# Remove uneccessary columns\n", + "prepared_prompt_tuning_df = prepared_prompt_tuning_df.drop(\n", + " columns=[\"user_question\", \"context\", \"prompt\", \"answer\"]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_DUFEAb82eEi" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nF3XY_d_yB-K" + }, + "source": [ + "#### Upload samples to bucket\n", + "\n", + "Once you prepare samples, you can upload them on Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "155paLgGUXOm" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df.to_json(\n", + " INPUT_TUNING_DATA_FILE_URI, orient=\"records\", lines=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F5RD0l2xX-FI" + }, + "source": [ + "#### Configure optimization settings\n", + "\n", + "Vertex AI Prompt Optimizer allows you to optimize prompts by optimizing instructions only, demonstration only, or both (`optimization_mode`), and after you set the system instruction, prompt templates that will be optimized (`system_instruction`, `prompt_template`), and the model you want to optimize for (`target_model`), it allows to condition the optimization process by setting metrics, number of iterations used to improve the prompt and more.\n", + "\n", + "Below you have some configurations as default that are most commonly used and recommended. And if you want to have more control of the optimization process, Vertex AI Prompt Optimizer (Preview) provides also additional configurations. Refer [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer) to learn more about the different parameters settings and how to best utilize them.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sFHutXhgeqRx" + }, + "outputs": [], + "source": [ + "PROMPT_OPTIMIZATION_JOB = \"auto-prompt-design-job-\" + get_id()\n", + "OUTPUT_TUNING_RUN_URI = str(OUTPUT_TUNING_DATA_URI / PROMPT_OPTIMIZATION_JOB)\n", + "\n", + "args = Namespace(\n", + " # Basic configuration\n", + " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE,\n", + " prompt_template=PROMPT_TEMPLATE,\n", + " target_model=\"gemini-1.5-flash-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " optimization_mode=\"instruction\", # Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", + " num_steps=3,\n", + " num_template_eval_per_step=2,\n", + " num_demo_set_candidates=3,\n", + " demo_set_size=2,\n", + " input_data_path=INPUT_TUNING_DATA_FILE_URI,\n", + " output_path=OUTPUT_TUNING_RUN_URI,\n", + " project=PROJECT_ID,\n", + " # Advanced configuration\n", + " target_model_qps=1,\n", + " target_model_location=\"us-central1\",\n", + " source_model=\"\",\n", + " source_model_qps=\"\",\n", + " source_model_location=\"\",\n", + " eval_model=\"gemini-1.5-pro-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " eval_qps=1,\n", + " eval_model_location=\"us-central1\",\n", + " eval_metrics_types=[\n", + " \"question_answering_correctness\",\n", + " \"groundedness\",\n", + " ], # Supported metrics: \"bleu\", \"coherence\", \"exact_match\", \"fluidity\", \"fulfillment\", \"groundedness\", \"rouge_1\", \"rouge_2\", \"rouge_l\", \"rouge_l_sum\", \"safety\", \"question_answering_correctness\", \"question_answering_helpfulness\", \"question_answering_quality\", \"question_answering_relevance\", \"summarization_helpfulness\", \"summarization_quality\", \"summarization_verbosity\", \"tool_name_match\", \"tool_parameter_key_match\", \"tool_parameter_kv_match\"\n", + " eval_metrics_weights=[0.9, 0.1],\n", + " aggregation_type=\"weighted_sum\", # Supported aggregation types: \"weighted_sum\", \"weighted_average\"\n", + " data_limit=50,\n", + " response_mime_type=\"application/json\",\n", + " language=\"English\", # Supported languages: \"English\", \"French\", \"German\", \"Hebrew\", \"Hindi\", \"Japanese\", \"Korean\", \"Portuguese\", \"Simplified Chinese\", \"Spanish\", \"Traditional Chinese\"\n", + " placeholder_to_content=json.loads(\"{}\"),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Jd_uzQYQx6L7" + }, + "source": [ + "#### Upload Vertex AI Prompt Optimizer (Preview) config to Cloud Storage\n", + "\n", + "After you define Vertex AI Prompt Optimizer (Preview) configuration, you upload them on Cloud Storage bucket.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QCJAqcfWBqAh" + }, + "source": [ + "Now you can save the config to the bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iqiv8ApR_SAM" + }, + "outputs": [], + "source": [ + "args = vars(args)\n", + "\n", + "with epath.Path(CONFIG_FILE_URI).open(\"w\") as config_file:\n", + " json.dump(args, config_file)\n", + "config_file.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "spqgBT8hYAle" + }, + "source": [ + "#### Run the automatic prompt optimization job\n", + "\n", + "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GtPnvKIpUQ3q" + }, + "outputs": [], + "source": [ + "WORKER_POOL_SPECS = [\n", + " {\n", + " \"machine_spec\": {\n", + " \"machine_type\": \"n1-standard-4\",\n", + " },\n", + " \"replica_count\": 1,\n", + " \"container_spec\": {\n", + " \"image_uri\": APD_CONTAINER_URI,\n", + " \"args\": [\"--config=\" + CONFIG_FILE_URI],\n", + " },\n", + " }\n", + "]\n", + "\n", + "custom_job = aiplatform.CustomJob(\n", + " display_name=PROMPT_OPTIMIZATION_JOB,\n", + " worker_pool_specs=WORKER_POOL_SPECS,\n", + ")\n", + "\n", + "custom_job.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3YwwKBhtJ4ut" + }, + "source": [ + "### Collect the optimization results\n", + "\n", + "Vertex AI Prompt Optimizer returns both optimized templates and evaluation results for either instruction, or demostrations, or both depending on the optimization mode you define as JSONL files on Cloud Storage bucket. Those results help you understand the optimization process.\n", + "\n", + "In this case, you want to collect the optimized templates and evaluation results for the instruction.\n", + "\n", + "Below you use a helper function to read those results.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xTPJsvg-kzkO" + }, + "outputs": [], + "source": [ + "apd_result_uris = get_results_file_uris(\n", + " output_uri=OUTPUT_TUNING_RUN_URI,\n", + " required_files=[\"eval_results.json\", \"templates.json\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZezzQSYWjYPd" + }, + "source": [ + "#### Get the best system instruction\n", + "\n", + "Below you have the optimal system instruction template and the associated evaluation metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PrezXkBUu1s5" + }, + "outputs": [], + "source": [ + "best_prompt_df, prompt_summary_df, prompt_metrics_df = get_optimization_result(\n", + " apd_result_uris[\"instruction_templates\"],\n", + " apd_result_uris[\"instruction_eval_results\"],\n", + ")\n", + "\n", + "display_eval_report(\n", + " (best_prompt_df, prompt_summary_df, prompt_metrics_df),\n", + " prompt_component=\"instruction\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TrMrbcA5Gzep" + }, + "source": [ + "### Validate and Evaluate the optimized template in question-answering task\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bGRELw3U3I28" + }, + "source": [ + "#### Generate new responses using the optimized template\n", + "\n", + "Finally, you generate the new responses with the optimized template. Below you can see an example of a generated response using the optimized system instructions template." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GXDU_ydAG5ak" + }, + "outputs": [], + "source": [ + "optimized_prompt_template = (\n", + " best_prompt_df[\"prompt\"].iloc[0]\n", + " + \"\\nQuestion: \\n{question}\"\n", + " + \"\\nContext: \\n{context}\"\n", + ")\n", + "\n", + "optimized_prompts = [\n", + " optimized_prompt_template.format(question=q, context=c)\n", + " for q, c in zip(\n", + " prompt_tuning_df[\"user_question\"].to_list(),\n", + " prompt_tuning_df[\"context\"].to_list(),\n", + " )\n", + "]\n", + "\n", + "prompt_tuning_df[\"optimized_prompt_with_vapo\"] = optimized_prompts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qG6QJW8alttS" + }, + "outputs": [], + "source": [ + "gemini_llm = init_new_model(\"gemini-1.5-flash-001\")\n", + "\n", + "gemini_predictions = [async_generate(p, model=gemini_llm) for p in optimized_prompts]\n", + "\n", + "gemini_predictions_col = await tqdm_asyncio.gather(*gemini_predictions)\n", + "\n", + "prompt_tuning_df[\"gemini_answer_with_vapo\"] = gemini_predictions_col" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_55cHbD4kFAz" + }, + "outputs": [], + "source": [ + "print_df_rows(prompt_tuning_df, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "D1wxiPhv21TT" + }, + "source": [ + "#### Evaluate new responses using Vertex AI Gen AI Evaluation\n", + "\n", + "And you use the generated responses with the optimized prompt to run a new round of evaluation with Vertex AI Gen AI Evaluation.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5Ebtvk0fKApV" + }, + "outputs": [], + "source": [ + "evaluation_qa_results.append(\n", + " (\n", + " \"qa_eval_result_new_model_with_vapo\",\n", + " evaluate_task(\n", + " df=prompt_tuning_df,\n", + " prompt_col=\"optimized_prompt_with_vapo\",\n", + " reference_col=\"reference\",\n", + " response_col=\"gemini_answer_with_vapo\",\n", + " experiment_name=EXPERIMENT_NAME,\n", + " eval_metrics=[\"question_answering_quality\", \"groundedness\"],\n", + " eval_sample_n=len(prompt_tuning_df),\n", + " ),\n", + " )\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wJXNAnJjmnga" + }, + "outputs": [], + "source": [ + "plot_eval_metrics(evaluation_qa_results)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a4e033321ad" + }, + "source": [ + "## IV. Clean up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WRY_3wh1GVNm" + }, + "outputs": [], + "source": [ + "delete_bucket = False\n", + "delete_job = False\n", + "delete_experiment = False\n", + "delete_tutorial = False\n", + "\n", + "if delete_bucket:\n", + " ! gsutil rm -r $BUCKET_URI\n", + "\n", + "if delete_job:\n", + " custom_job.delete()\n", + "\n", + "if delete_experiment:\n", + " experiment = aiplatform.Experiment(experiment_name=EXPERIMENT_NAME)\n", + " experiment.delete()\n", + "\n", + "if delete_tutorial:\n", + " import shutil\n", + "\n", + " shutil.rmtree(str(TUTORIAL_PATH))" + ] + } + ], + "metadata": { + "colab": { + "name": "get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/gemini/prompts/prompt_optimizer/utils/helpers.py b/gemini/prompts/prompt_optimizer/utils/helpers.py new file mode 100644 index 0000000000..c20b10269c --- /dev/null +++ b/gemini/prompts/prompt_optimizer/utils/helpers.py @@ -0,0 +1,357 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import random +import string +from typing import Dict, List, Optional, Tuple, Union + +from IPython.display import HTML, Markdown, display +from etils import epath +import pandas as pd +import plotly.graph_objects as go +from tenacity import retry, wait_random_exponential +from vertexai import generative_models +from vertexai.evaluation import EvalTask +from vertexai.generative_models import GenerativeModel + +METRICS = [ + "bleu", + "coherence", + "exact_match", + "fluidity", + "fulfillment", + "groundedness", + "rouge_1", + "rouge_2", + "rouge_l", + "rouge_l_sum", + "safety", + "question_answering_correctness", + "question_answering_helpfulness", + "question_answering_quality", + "question_answering_relevance", + "summarization_helpfulness", + "summarization_quality", + "summarization_verbosity", + "tool_name_match", + "tool_parameter_key_match", + "tool_parameter_kv_match", +] +COMPOSITE_METRIC = "composite_metric" + + +def get_id(length: Union[int, None] = 8) -> str: + """Generate a uuid of a specified length (default=8).""" + if length is None: + length = 8 + return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) + + +@retry(wait=wait_random_exponential(multiplier=1, max=120)) +async def async_generate(prompt: str, model: GenerativeModel) -> Union[str, None]: + """Generate a response from the model.""" + response = await model.generate_content_async( + [prompt], + stream=False, + ) + return response.text[0] if response.text else None + + +def evaluate_task( + df: pd.DataFrame, + prompt_col: str, + reference_col: str, + response_col: str, + experiment_name: str, + eval_metrics: List[str], + eval_sample_n: int, +) -> Dict[str, float]: + """Evaluate task using Vertex AI Evaluation.""" + + # Generate a unique id for the experiment run + id = get_id() + + # Rename the columns to match the expected format + eval_dataset = df[[prompt_col, reference_col, response_col]].rename( + columns={ + prompt_col: "prompt", + reference_col: "reference", + response_col: "response", + } + ) + + # Drop rows with missing values + eval_dataset = eval_dataset.dropna() + + # Sample a subset of the dataset + eval_dataset = eval_dataset.sample(n=eval_sample_n, random_state=8).reset_index( + drop=True + ) + + # Create an EvalTask object + eval_task = EvalTask( + dataset=eval_dataset, + metrics=eval_metrics, + experiment=experiment_name, + ) + + # Evaluate the task + result = eval_task.evaluate(experiment_run_name=f"{experiment_name}-{id}") + + # Return the summary metrics + return result.summary_metrics + + +def print_df_rows( + df: pd.DataFrame, columns: Optional[List[str]] = None, n: int = 3 +) -> None: + """Print a subset of rows from a DataFrame.""" + + # Define the base style for the text + base_style = ( + "white-space: pre-wrap; width: 800px; overflow-x: auto; font-size: 16px;" + ) + + # Define the header style for the text + header_style = ( + "white-space: pre-wrap; width: 800px; overflow-x: auto; font-size: 16px;" + ) + + # If columns are specified, filter the DataFrame + if columns: + df = df[columns] + + # Initialize the counter for printed samples + printed_samples = 0 + + # Iterate over the rows of the DataFrame + for _, row in df.iterrows(): + for field in df.columns: + display(HTML(f"{field.capitalize()}:")) + display(HTML("
")) + value = row[field] + display(HTML(f"{value}")) + display(HTML("
")) + + printed_samples += 1 + if printed_samples >= n: + break + + +def init_new_model(model_name: str) -> GenerativeModel: + """Initialize a new model.""" + + # Initialize the model + model = GenerativeModel( + model_name=model_name, + generation_config={ + "candidate_count": 1, + "max_output_tokens": 2048, + "temperature": 0.5, + }, + safety_settings={ + generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_NONE, + generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_NONE, + generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_NONE, + generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_NONE, + }, + ) + return model + + +def plot_eval_metrics( + eval_results: List[tuple[str, Dict[str, float]]], + metrics: Optional[List[str]] = None, +) -> None: + """Plot a bar plot for the evaluation results.""" + + # Create data for the bar plot + data = [] + for eval_result in eval_results: + title, summary_metrics = eval_result + if metrics: + summary_metrics = { + k: summary_metrics[k] + for k, v in summary_metrics.items() + if any(selected_metric in k for selected_metric in metrics) + } + + summary_metrics = {k: v for k, v in summary_metrics.items() if "mean" in k} + data.append( + go.Bar( + x=list(summary_metrics.keys()), + y=list(summary_metrics.values()), + name=title, + ) + ) + + # Update the figure with the data + fig = go.Figure(data=data) + + # Add the title + fig.update_layout( + title=go.layout.Title(text="Evaluation Metrics", x=0.5), + xaxis_title="Metric Name", + yaxis_title="Mean Value", + ) + + # Change the bar mode + fig.update_layout(barmode="group") + + # Show the plot + fig.show() + + +def get_results_file_uris( + output_uri: str, required_files: List[str] = ["eval_results.json", "templates.json"] +) -> Dict[str, str]: + """Finds directories containing specific files under the given full GCS path.""" + + # Create a path object for the given output URI + path = epath.Path(output_uri) + + # Initialize a dictionary to store the results file URIs + results_file_uris: Dict[str, str] = {} + + # Iterate over the directories and files in the path + for directory in path.iterdir(): + for file in directory.iterdir(): + if file.name in required_files: + file_key = directory.name + "_" + file.stem + results_file_uris[file_key] = str(directory / file) + + # Return the results file URIs + return results_file_uris + + +def get_best_template(template_uri: str) -> pd.DataFrame: + """Retrieves and processes the best template.""" + + # Load templates from the URI + with epath.Path(template_uri).open() as f: + templates = json.load(f) + + # Process metrics for each template + for template in templates: + template["metrics"] = { + key.split("/")[0]: value for key, value in template["metrics"].items() + } + + # Sort templates based on composite metric or highest metric value + if any(template["metrics"].get(COMPOSITE_METRIC) for template in templates): + sorted_templates = sorted( + templates, key=lambda x: x["metrics"][COMPOSITE_METRIC], reverse=True + ) + elif any( + metric in template["metrics"] for template in templates for metric in METRICS + ): + sorted_metrics = sorted( + templates, key=lambda x: max(x["metrics"].values()), reverse=True + ) + top_metric = list(sorted_metrics[0]["metrics"].keys())[0] + sorted_templates = sorted( + templates, key=lambda x: x["metrics"][top_metric], reverse=True + ) + else: + raise ValueError("No valid metrics found in templates.") + + # Create a DataFrame with the best template and metrics + best_template_df = pd.DataFrame([sorted_templates[0]]) + + # Add metrics as separate columns + for metric in best_template_df["metrics"].iloc[0]: + best_template_df[f"metrics_{metric}"] = best_template_df["metrics"].apply( + lambda x: x[metric] + ) + + # Drop the 'metrics' column + best_template_df = best_template_df.drop("metrics", axis=1) + + return best_template_df + + +def get_best_evaluation( + best_template_df: pd.DataFrame, eval_result_uri: str +) -> Tuple[pd.DataFrame, pd.DataFrame]: + """Retrieves and processes the best evaluation.""" + + # Load the evaluations from the URI + with epath.Path(eval_result_uri).open() as f: + evaluations = json.load(f) + + # Get the best index from the best template DataFrame + best_index = best_template_df["step"].iloc[0] + + # Retrieve the best evaluation based on the index + best_evaluation: Dict = evaluations[best_index] + + # Create a DataFrame from the summary results + summary_df = pd.DataFrame([best_evaluation["summary_results"]]) + + # Load the metrics table from the best evaluation + metrics_table = json.loads(best_evaluation["metrics_table"]) + + # Create a DataFrame from the metrics table + metrics_df = pd.DataFrame(metrics_table) + + return summary_df, metrics_df + + +def get_optimization_result( + template_uri: str, eval_result_uri: str +) -> Union[Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame], None]: + """Retrieves and processes the best template and evaluation results.""" + + # Get the best template DataFrame + best_template_df = get_best_template(template_uri) + + # Get the summary and metrics DataFrames for the best evaluation + summary_df, metrics_df = get_best_evaluation(best_template_df, eval_result_uri) + + return best_template_df, summary_df, metrics_df + + +def display_eval_report( + eval_result: Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]], + prompt_component: str = "instruction", +) -> None: + """Displays evaluation results with optional filtering by metrics.""" + + # Unpack the evaluation result + best_template_df, summary_df, metrics_df = eval_result + + # Display the report title + display(Markdown("## Vertex AI Prompt Optimizer - Report")) + + # Display the prompt component title + if prompt_component == "instruction": + display(Markdown("### Best Instruction")) + elif prompt_component == "demonstration": + display(Markdown("### Best Demonstration")) + else: + raise ValueError( + "Invalid prompt_component value. Must be 'instruction' or 'demonstration'." + ) + + # Display the best template DataFrame + display(best_template_df.style.hide(axis="index")) + + # Display the summary metrics title + display(Markdown("### Summary Metrics")) + display(summary_df.style.hide(axis="index")) + + # Display the report metrics title + display(Markdown("### Report Metrics")) + display(metrics_df.style.hide(axis="index")) From 518e6c9ed33bf49d0dd0e6f4a6502cdec8d487b7 Mon Sep 17 00:00:00 2001 From: Alok Pattani <51244947+alokpattani@users.noreply.github.com> Date: Tue, 24 Sep 2024 08:31:29 -0700 Subject: [PATCH 06/76] feat: Adding YouTube video analysis notebook (#1161) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --------- Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- .../youtube_video_analysis.ipynb | 1409 +++++++++++++++++ 1 file changed, 1409 insertions(+) create mode 100644 gemini/use-cases/video-analysis/youtube_video_analysis.ipynb diff --git a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb new file mode 100644 index 0000000000..500437a464 --- /dev/null +++ b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb @@ -0,0 +1,1409 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# YouTube Video Analysis with Gemini\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Alok Pattani](https://github.com/alokpattani/) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "In this notebook, you'll explore how to do direct analysis of publicly available [YouTube](https://www.youtube.com/) videos with Gemini.\n", + "\n", + "You will complete the following tasks:\n", + "- Summarizing a single YouTube video using Gemini 1.5 Flash\n", + "- Extracting a specific set of structured outputs from a longer YouTube video using Gemini 1.5 Pro and controlled generation\n", + "- Creating insights from analyzing multiple YouTube videos together using asynchronous generation with Gemini" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install --upgrade --user --quiet google-cloud-aiplatform itables" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'status': 'ok', 'restart': True}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdvJRUWRNGHE" + }, + "source": [ + "## Set up libraries, options, and models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "import json\n", + "import time\n", + "\n", + "from IPython.display import HTML, Markdown, display\n", + "from itables import show\n", + "import itables.options as itable_opts\n", + "import pandas as pd\n", + "from tenacity import retry, stop_after_attempt, wait_random_exponential\n", + "from vertexai.generative_models import GenerationConfig, GenerativeModel, Part" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f86c665a5d94" + }, + "source": [ + "### Configure some notebook options" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "4730b9f09e1e" + }, + "outputs": [], + "source": [ + "# Configure some options related to interactive tables\n", + "itable_opts.maxBytes = 1e9\n", + "itable_opts.maxColumns = 50\n", + "\n", + "itable_opts.order = []\n", + "itable_opts.column_filters = \"header\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e43229f3ad4f" + }, + "source": [ + "### Load models" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "cf93d5f0ce00" + }, + "outputs": [], + "source": [ + "# Set Gemini Flash and Pro models to be used in this notebook\n", + "GEMINI_FLASH_MODEL_ID = \"gemini-1.5-flash-002\"\n", + "GEMINI_PRO_MODEL_ID = \"gemini-1.5-pro-002\"\n", + "\n", + "gemini_flash_model = GenerativeModel(GEMINI_FLASH_MODEL_ID)\n", + "gemini_pro_model = GenerativeModel(GEMINI_PRO_MODEL_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "109111fae02c" + }, + "source": [ + "## Summarize a YouTube video" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1fe7e2663c3a" + }, + "source": [ + "Provide a link to a public YouTube video that you'd like to summarize. Ensure that the video is less than an hour long (if using Gemini 1.5 Flash, as is shown below; can try up to a 2-hour video with Gemini 1.5 Pro) to make sure it fits in the context window.\n", + "\n", + "The default content to be summarized is [this 6.5-minute video showing how Major League Baseball (MLB) analyzes data using Google Cloud](https://www.youtube.com/watch?v=O_W_VGUeHVI)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "5c8a32e14eec" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Provide link to a public YouTube video to summarize\n", + "YOUTUBE_VIDEO_URL = (\n", + " \"https://www.youtube.com/watch?v=O_W_VGUeHVI\" # @param {type:\"string\"}\n", + ")\n", + "\n", + "youtube_video_embed_url = YOUTUBE_VIDEO_URL.replace(\"/watch?v=\", \"/embed/\")\n", + "\n", + "# Create HTML code to directly embed video\n", + "youtube_video_embed_html_code = f\"\"\"\n", + "\n", + "\"\"\"\n", + "\n", + "# Display embedded YouTube video\n", + "display(HTML(youtube_video_embed_html_code))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "9bd742163fc7" + }, + "outputs": [ + { + "data": { + "text/markdown": [ + "This Google Cloud promotional video explores how Major League Baseball (MLB) uses data analytics to enhance the fan experience. Priyanka Vergadia, Lead Developer Advocate at Google, interviews several MLB representatives to discuss the process. \n", + "\n", + "The MLB collects 25 million unique data points per game using Hawk-Eye cameras. This data is then processed and stored in Google Cloud using Anthos, Kubernetes Engine, Bigtable, and other technologies. This information powers MLB tools like MLB Film Room, and makes data accessible to fans and analysts. \n", + "\n", + "Rob Engel, Senior Director of Software Engineering at MLB, explains the technology that enables real-time data transfer. He discusses the use of Anthos, Kubernetes Engine, and Cloud SQL to process and store the data.\n", + "\n", + "John Kraizt, Director, Baseball Systems at Arizona Diamondbacks, explains how MLB teams use the data. Finally, Sarah Langs, Reporter/Researcher at MLB Advanced Media, describes how analysts use websites such as Baseball Savant and Statcast to translate the data for fan consumption." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Call Gemini API with prompt to summarize video\n", + "video_summary_prompt = \"Summarize this video.\"\n", + "\n", + "video_summary_response = gemini_flash_model.generate_content(\n", + " [video_summary_prompt, Part.from_uri(mime_type=\"video/webm\", uri=YOUTUBE_VIDEO_URL)]\n", + ")\n", + "\n", + "# Display results\n", + "display(Markdown(video_summary_response.text))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "09221a4ba6a9" + }, + "source": [ + "## Extract structured output from a YouTube video" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "db6bc26fca7d" + }, + "source": [ + "Next, we'll show how to extract structured outputs using [controlled generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output), in this case from a video that covers multiple topics.\n", + "\n", + "We’re going to see how Gemini Pro’s industry-leading 2 million token context window can help analyze [the full opening keynote](https://www.youtube.com/watch?v=V6DJYGn2SFk) from our Next conference back in April - all 1 hour and 41 minutes of it!" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "fc98b36d5fc4" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Link to full Cloud Next '24 Opening Keynote video\n", + "cloud_next_keynote_video_url = \"https://www.youtube.com/watch?v=V6DJYGn2SFk\"\n", + "\n", + "# Uncomment line below to replace with 14-min keynote summary video instead (faster)\n", + "# cloud_next_keynote_video_url = \"https://www.youtube.com/watch?v=M-CzbTUVykg\"\n", + "\n", + "cloud_next_keynote_video_embed_url = cloud_next_keynote_video_url.replace(\n", + " \"/watch?v=\", \"/embed/\"\n", + ")\n", + "\n", + "# Create HTML code to directly embed video\n", + "cloud_next_keynote_youtube_video_embed_html_code = f\"\"\"\n", + "\n", + "\"\"\"\n", + "\n", + "# Display embedded YouTube video\n", + "display(HTML(cloud_next_keynote_youtube_video_embed_html_code))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e904c020d521" + }, + "source": [ + "Below is a prompt to extract the biggest product announcements that were made during this keynote. We use the response schema to show that we want valid JSON output in a particular form, including a constraint specifying that the \"product status\" field should be either GA, Preview, or Coming Soon.\n", + "\n", + "The following cell may take several minutes to run, as Gemini 1.5 Pro is analyzing all 101 minutes of the video and audio to produce comprehensive results." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "d5a93cd5d2fa" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{\"name\": \"Gemini 1.5 Pro\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Gemini 1.5 Pro shows dramatically enhanced performance and includes a breakthrough in long context understanding.\"}, {\"name\": \"A3 Mega VMs\", \"product_status\": \"GA\", \"quote_from_presenter\": \"A3 Mega VMs powered by Nvidia H100 Tensor Core GPUs with twice the network bandwidth vs. A3 instances.\"}, {\"name\": \"TPU v5p\", \"product_status\": \"GA\", \"quote_from_presenter\": \"Our latest generation TPU pod consists of 8,960 chips interconnected to support the largest scale ML training and serving.\"}, {\"name\": \"NVIDIA GB200 NVL72\", \"product_status\": \"Coming Soon\", \"quote_from_presenter\": \"Nvidia's newest Grace Blackwell generation of GPUs coming to Google Cloud early in 2025.\"}, {\"name\": \"Google Axion Processors\", \"product_status\": \"Coming Soon\", \"quote_from_presenter\": \"Google's first custom Arm-based CPU designed for the data center will be available in preview later this year.\"}, {\"name\": \"Cloud Storage Fuse Caching\", \"product_status\": \"GA\", \"quote_from_presenter\": \"Cloud Storage Fuse Caching is generally available.\"}, {\"name\": \"Parallelstore Caching\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Parallelstore Caching is in preview.\"}, {\"name\": \"Hyperdisk ML\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Hyperdisk ML is our next generation block storage service optimized for AI inference and serving workloads.\"}, {\"name\": \"Dynamic Workload Scheduler\", \"product_status\": \"GA\", \"quote_from_presenter\": \"Dynamic Workload Scheduler now has two new options: Calendar Mode for start time assurance and Flex Start for optimized economics.\"}, {\"name\": \"Gemini in Threat Intelligence\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Gemini in Threat Intelligence is in preview.\"}, {\"name\": \"Gemini in Security Command Center\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Gemini in Security Command Center is in preview.\"}, {\"name\": \"Google Vids\", \"product_status\": \"GA\", \"quote_from_presenter\": \"Google Vids is generally available.\"}, {\"name\": \"Text-to-Live Image\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Text-to-Live Image is in preview.\"}, {\"name\": \"Digital Watermarking\", \"product_status\": \"GA\", \"quote_from_presenter\": \"Digital Watermarking is generally available.\"}, {\"name\": \"New Editing Modes in Imagen 2.0\", \"product_status\": \"GA\", \"quote_from_presenter\": \"New Editing Modes in Imagen 2.0 are generally available.\"}, {\"name\": \"Gemini in BigQuery\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Gemini in BigQuery simplifies data preparation using AI.\"}, {\"name\": \"Vector Indexing in BigQuery and AlloyDB\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Vector Indexing in BigQuery and AlloyDB allows you to query the right data in analytical and operational systems.\"}, {\"name\": \"BigQuery Data Canvas\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"BigQuery Data Canvas provides a notebook-like experience with natural language and embedded visualizations.\"}, {\"name\": \"Gemini Code Assist\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Gemini Code Assist is in preview.\"}, {\"name\": \"Vertex AI Agent Builder\", \"product_status\": \"Preview\", \"quote_from_presenter\": \"Vertex AI Agent Builder lets you create customer agents that are amazingly powerful in just three key steps.\"}] \n" + ] + } + ], + "source": [ + "# Set up pieces (prompt, response schema, config) and run video extraction\n", + "\n", + "video_extraction_prompt = (\n", + " \"Provide a summary of the biggest product announcements \"\n", + " \"that were made in this Google Cloud Next keynote video including:\\n\"\n", + " \" - name\\n\"\n", + " ' - product status: \"GA\" (Generally Available), \"Preview\", or \"Coming Soon\"\\n'\n", + " \" - key quote from the presenter about the product, 20 words or fewer per product\\n\\n\"\n", + " \"Make sure to look through and listen to the whole video, start to finish, to find \"\n", + " \"the top product announcements. Only reference information in the video itself in \"\n", + " \"your response.\"\n", + ")\n", + "\n", + "video_extraction_response_schema = {\n", + " \"type\": \"ARRAY\",\n", + " \"items\": {\n", + " \"type\": \"OBJECT\",\n", + " \"properties\": {\n", + " \"name\": {\"type\": \"STRING\"},\n", + " \"product_status\": {\n", + " \"type\": \"STRING\",\n", + " \"enum\": [\"GA\", \"Preview\", \"Coming Soon\"],\n", + " },\n", + " \"quote_from_presenter\": {\"type\": \"STRING\"},\n", + " },\n", + " },\n", + "}\n", + "\n", + "video_extraction_json_generation_config = GenerationConfig(\n", + " temperature=0.0,\n", + " max_output_tokens=8192,\n", + " response_mime_type=\"application/json\",\n", + " response_schema=video_extraction_response_schema,\n", + ")\n", + "\n", + "video_extraction_response = gemini_pro_model.generate_content(\n", + " [\n", + " video_extraction_prompt,\n", + " Part.from_uri(mime_type=\"video/webm\", uri=cloud_next_keynote_video_url),\n", + " ],\n", + " generation_config=video_extraction_json_generation_config,\n", + ")\n", + "\n", + "video_extraction_response_text = video_extraction_response.text\n", + "\n", + "print(video_extraction_response_text)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "b7b6aa978eb8" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
nameproduct_statusquote_from_presenter
\n", + "\n", + "
\n", + "Loading ITables v2.2.1 from the internet...\n", + "(need help?)
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Convert structured output from response to data frame for display and/or further analysis\n", + "video_extraction_response_df = pd.DataFrame(json.loads(video_extraction_response_text))\n", + "\n", + "show(video_extraction_response_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cfa2e8496790" + }, + "source": [ + "## Creating insights from analyzing multiple YouTube videos together" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c634255fd419" + }, + "source": [ + "### Google \"Year in Search\" videos\n", + "Now, consider expanding the problem to a more common enterprise use case: extracting information from _multiple_ YouTube videos at once.\n", + "\n", + "This time, we’ll use [Google’s “Year in Search” videos](https://about.google/intl/ALL_us/stories/year-in-search/), which summarize the questions, people, and moments that captured the world’s attention in each year. As of fall 2024, there are 14 of these videos, each 2-4 minutes in length, from [2010](https://www.youtube.com/watch?v=F0QXB5pw2qE) through [2023](https://www.youtube.com/watch?v=3KtWfp0UopM).\n", + "\n", + "We start by reading in a CSV file that has links to all the videos." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "b004061c908a" + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
yearyt_link
02023https://www.youtube.com/watch?v=3KtWfp0UopM
12022https://www.youtube.com/watch?v=4WXs3sKu41I
22021https://www.youtube.com/watch?v=EqboAI-Vk-U
32020https://www.youtube.com/watch?v=rokGy0huYEA
42019https://www.youtube.com/watch?v=ZRCdORJiUgU
52018https://www.youtube.com/watch?v=6aFdEhEZQjE
62017https://www.youtube.com/watch?v=vI4LHl4yFuo
72016https://www.youtube.com/watch?v=KIViy7L_lo8
82015https://www.youtube.com/watch?v=q7o7R5BgWDY
92014https://www.youtube.com/watch?v=DVwHCGAr_OE
102013https://www.youtube.com/watch?v=Lv-sY_z8MNs
112012https://www.youtube.com/watch?v=xY_MUB8adEQ
122011https://www.youtube.com/watch?v=SAIEamakLoY
132010https://www.youtube.com/watch?v=F0QXB5pw2qE
\n", + "
" + ], + "text/plain": [ + " year yt_link\n", + "0 2023 https://www.youtube.com/watch?v=3KtWfp0UopM\n", + "1 2022 https://www.youtube.com/watch?v=4WXs3sKu41I\n", + "2 2021 https://www.youtube.com/watch?v=EqboAI-Vk-U\n", + "3 2020 https://www.youtube.com/watch?v=rokGy0huYEA\n", + "4 2019 https://www.youtube.com/watch?v=ZRCdORJiUgU\n", + "5 2018 https://www.youtube.com/watch?v=6aFdEhEZQjE\n", + "6 2017 https://www.youtube.com/watch?v=vI4LHl4yFuo\n", + "7 2016 https://www.youtube.com/watch?v=KIViy7L_lo8\n", + "8 2015 https://www.youtube.com/watch?v=q7o7R5BgWDY\n", + "9 2014 https://www.youtube.com/watch?v=DVwHCGAr_OE\n", + "10 2013 https://www.youtube.com/watch?v=Lv-sY_z8MNs\n", + "11 2012 https://www.youtube.com/watch?v=xY_MUB8adEQ\n", + "12 2011 https://www.youtube.com/watch?v=SAIEamakLoY\n", + "13 2010 https://www.youtube.com/watch?v=F0QXB5pw2qE" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Read in table of Year in Search video links from public CSV file\n", + "GOOGLE_YEAR_IN_SEARCH_VIDEO_LINKS_CSV_GCS_URI = (\n", + " \"gs://github-repo/video/google_year_in_search_video_links.csv\"\n", + ")\n", + "\n", + "year_in_search_yt_links = pd.read_csv(GOOGLE_YEAR_IN_SEARCH_VIDEO_LINKS_CSV_GCS_URI)\n", + "\n", + "year_in_search_yt_links" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "145522e33a47" + }, + "source": [ + "### Set up for analyzing multiple video files\n", + "\n", + "Let’s say we are a sports agency who wants to see which athletes or teams appear most often in these videos as a measure of cultural relevance. Instead of watching and manually counting, we can use Gemini’s multimodal capabilities and world knowledge to extract each appearance of an athlete or team into a structured output that we can use for further analysis.\n", + "\n", + "The system instructions, prompt, and response schema that will apply to all 14 videos are each created in the cell below." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "b8589a51547d" + }, + "outputs": [], + "source": [ + "# Set up pieces (prompt, response schema, config) for Google Year in Search videos\n", + "multiple_video_extraction_system_instruction_text = (\n", + " \"You are a video analyst that \"\n", + " \"carefully looks through all frames of provided videos, extracting out the \"\n", + " \"pieces necessary to respond to user prompts.\"\n", + ")\n", + "\n", + "multiple_video_extraction_prompt = (\n", + " \"Which sports athletes or teams are mentioned or \"\n", + " \"shown in this video? Please look through each frame carefully, and respond \"\n", + " \"with a complete list that includes the athlete or team's name (1 row per \"\n", + " \"athlete or team), whether they are an athlete or team, the sport they play, \"\n", + " \"and the timestamp into the video at which they appear (in mm:ss format, \"\n", + " \"do not give extra precision) for each one.\"\n", + ")\n", + "\n", + "multiple_video_extraction_response_schema = {\n", + " \"type\": \"ARRAY\",\n", + " \"items\": {\n", + " \"type\": \"OBJECT\",\n", + " \"properties\": {\n", + " \"name\": {\"type\": \"STRING\"},\n", + " \"athlete_or_team\": {\"type\": \"STRING\", \"enum\": [\"athlete\", \"team\"]},\n", + " \"sport\": {\"type\": \"STRING\"},\n", + " \"video_timestamp\": {\"type\": \"STRING\"},\n", + " },\n", + " },\n", + "}\n", + "\n", + "multiple_video_extraction_json_generation_config = GenerationConfig(\n", + " temperature=0.0,\n", + " max_output_tokens=8192,\n", + " response_mime_type=\"application/json\",\n", + " response_schema=multiple_video_extraction_response_schema,\n", + ")\n", + "\n", + "multiple_video_extraction_model = GenerativeModel(\n", + " model_name=GEMINI_PRO_MODEL_ID,\n", + " system_instruction=multiple_video_extraction_system_instruction_text,\n", + " generation_config=multiple_video_extraction_json_generation_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0cb2d4688f68" + }, + "source": [ + "Next, we'll set up to run each of these prompt/video pairs through the Gemini API _asynchronously_. This allows us to send all the requests to Gemini at once, then wait for all the answers to come back - a more efficient process than sending them synchronously (one-by-one). See more details in [this Google Cloud Community Medium blog post](https://medium.com/google-cloud/how-to-prompt-gemini-asynchronously-using-python-on-google-cloud-986ca45d9f1b).\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "5aa93ca907bc" + }, + "outputs": [], + "source": [ + "# Function for asynchronous generation\n", + "\n", + "\n", + "@retry(wait=wait_random_exponential(multiplier=1, max=120), stop=stop_after_attempt(2))\n", + "async def async_generate(prompt, yt_link):\n", + " try:\n", + " response = await multiple_video_extraction_model.generate_content_async(\n", + " [prompt, Part.from_uri(mime_type=\"video/webm\", uri=yt_link)], stream=False\n", + " )\n", + "\n", + " response_dict = response.to_dict()\n", + "\n", + " return response_dict\n", + " except Exception as e:\n", + " print(\"Something failed, retrying\")\n", + " print(e)\n", + " with retry.stop_after_attempt(2) as retry_state:\n", + " if retry_state.attempt > 2:\n", + " return None\n", + " raise # Re-raise the exception for tenacity to handle" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61265bdff388" + }, + "source": [ + "### Run asynchronous Gemini calls to do video extraction" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "4191dc30d77a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Elapsed time: 63.80 seconds\n" + ] + } + ], + "source": [ + "# Perform asynhronous calls across all videos, gather responses\n", + "start_time = time.time()\n", + "\n", + "get_responses = [\n", + " async_generate(multiple_video_extraction_prompt, yt_link)\n", + " for yt_link in year_in_search_yt_links[\"yt_link\"]\n", + "]\n", + "\n", + "multiple_video_extraction_responses = await asyncio.gather(*get_responses)\n", + "\n", + "end_time = time.time()\n", + "\n", + "elapsed_time = end_time - start_time\n", + "\n", + "print(f\"Elapsed time: {elapsed_time:.2f} seconds\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7c69057ae51d" + }, + "source": [ + "### Extract and analyze video results across years\n", + "\n", + "Once we have the results from Gemini, we can process them and get table of every athlete or team apperance across all 14 \"Year in Search\" videos." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "id": "6e424adf2cf8" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
yearnameathlete_or_teamsportvideo_timestamp
\n", + "\n", + "
\n", + "Loading ITables v2.2.1 from the internet...\n", + "(need help?)
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Add structured outputs by year back to original table, show full extraction results\n", + "year_in_search_responses = year_in_search_yt_links.copy()\n", + "\n", + "year_in_search_responses[\"gemini_response\"] = [\n", + " json.dumps(response) for response in multiple_video_extraction_responses\n", + "]\n", + "\n", + "\n", + "def extract_result_df_from_gemini_response(year, gemini_response):\n", + " extract_response_text = json.loads(gemini_response)[\"candidates\"][0][\"content\"][\n", + " \"parts\"\n", + " ][0][\"text\"]\n", + "\n", + " extract_result_df = pd.DataFrame(json.loads(extract_response_text))\n", + "\n", + " extract_result_df[\"year\"] = year\n", + "\n", + " return extract_result_df\n", + "\n", + "\n", + "year_in_search_responses[\"extract_result_df\"] = year_in_search_responses.apply(\n", + " lambda row: extract_result_df_from_gemini_response(\n", + " row[\"year\"], row[\"gemini_response\"]\n", + " ),\n", + " axis=1,\n", + ")\n", + "\n", + "all_year_in_search_extractions = pd.concat(\n", + " year_in_search_responses[\"extract_result_df\"].tolist(), ignore_index=True\n", + ")[[\"year\", \"name\", \"athlete_or_team\", \"sport\", \"video_timestamp\"]]\n", + "\n", + "show(all_year_in_search_extractions)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b17e9b0af4e4" + }, + "source": [ + "Finally, we can count the number of years in which each athlete or team appeared in these videos, and return results for those who appeared more than once." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "id": "c0cd6041bce7" + }, + "outputs": [ + { + "data": { + "text/markdown": [ + "Athletes/Teams Appearing in Multiple Year in Search Videos" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
nameathlete_or_teamsportnum_years
0LEBRON JAMESathletebasketball3
1NAOMI OSAKAathletetennis3
2SIMONE BILESathletegymnastics3
3COCO GAUFFathletetennis2
4NICOLAS MAHUTathletetennis2
5SERENA WILLIAMSathletetennis2
\n", + "
" + ], + "text/plain": [ + " name athlete_or_team sport num_years\n", + "0 LEBRON JAMES athlete basketball 3\n", + "1 NAOMI OSAKA athlete tennis 3\n", + "2 SIMONE BILES athlete gymnastics 3\n", + "3 COCO GAUFF athlete tennis 2\n", + "4 NICOLAS MAHUT athlete tennis 2\n", + "5 SERENA WILLIAMS athlete tennis 2" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Analyze results to show athletes/teams showing up most often in Year in Search videos\n", + "multiple_year_in_search_app = (\n", + " all_year_in_search_extractions.assign(\n", + " # Convert 'name' to uppercase to handle e.g. \"LeBron\" vs \"Lebron\"\n", + " name=all_year_in_search_extractions[\"name\"].str.upper(),\n", + " # Convert 'athlete_or_team' to lowercase for consistency\n", + " athlete_or_team=all_year_in_search_extractions[\"athlete_or_team\"].str.lower(),\n", + " )\n", + " .groupby([\"name\", \"athlete_or_team\"])\n", + " .apply(\n", + " lambda x: pd.Series(\n", + " {\n", + " # Aggregate 'sport' across type and name (handling different cases)\n", + " \"sport\": \", \".join(sorted(x[\"sport\"].str.lower().unique())),\n", + " # Count # of diff years in which each athlete/team appears in video\n", + " \"num_years\": x[\"year\"].nunique(),\n", + " }\n", + " ),\n", + " include_groups=False,\n", + " )\n", + " .reset_index()\n", + " .\n", + " # Filter to only those appearing multiple times\n", + " query(\"num_years >= 2\")\n", + " .sort_values([\"num_years\", \"name\"], ascending=[False, True])\n", + " .reset_index(drop=True)\n", + ")\n", + "\n", + "# Display results\n", + "display(Markdown(\"Athletes/Teams Appearing in Multiple Year in Search Videos\"))\n", + "display(multiple_year_in_search_app)" + ] + } + ], + "metadata": { + "colab": { + "name": "youtube_video_analysis.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 8de0c2b3de8c15f36b7af7a501df26efa9f184b2 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Tue, 24 Sep 2024 11:43:32 -0400 Subject: [PATCH 07/76] feat: Add examples for direct analysis of publicly available web media (#1160) # Description Add examples for direct analysis of publicly available web media: 1. Image 2. Video + Controlled generation --- .../getting-started/intro_gemini_python.ipynb | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/gemini/getting-started/intro_gemini_python.ipynb b/gemini/getting-started/intro_gemini_python.ipynb index a5ec1446e9..2007da32de 100644 --- a/gemini/getting-started/intro_gemini_python.ipynb +++ b/gemini/getting-started/intro_gemini_python.ipynb @@ -1137,6 +1137,95 @@ "\n", "print(response.text)" ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5cdf391f2067" + }, + "source": [ + "### Direct analysis of publicly available web media\n", + "\n", + "This new feature enables you to directly process publicly available URL resources including images, text, video and audio with Gemini. This feature supports all currently [supported modalities and file formats](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#blob).\n", + "\n", + "In this example, you add the file URL of a publicly available image file to the request to identify what's in the image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d778a7e12b56" + }, + "outputs": [], + "source": [ + "prompt = \"\"\"\n", + "Extract the objects in the given image and output them in a list in alphabetical order.\n", + "\"\"\"\n", + "\n", + "image_file = Part.from_uri(\n", + " \"https://storage.googleapis.com/cloud-samples-data/generative-ai/image/office-desk.jpeg\",\n", + " \"image/jpeg\",\n", + ")\n", + "\n", + "response = model.generate_content([image_file, prompt])\n", + "\n", + "print(response.text)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "68186ce494bc" + }, + "source": [ + "This example demonstrates how to add the file URL of a publicly available video file to the request, and use the [controlled generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output) capability to constraint the model output to a structured format." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "75d61049f1cb" + }, + "outputs": [], + "source": [ + "response_schema = {\n", + " \"type\": \"ARRAY\",\n", + " \"items\": {\n", + " \"type\": \"OBJECT\",\n", + " \"properties\": {\n", + " \"timecode\": {\n", + " \"type\": \"STRING\",\n", + " },\n", + " \"chapter_summary\": {\n", + " \"type\": \"STRING\",\n", + " },\n", + " },\n", + " \"required\": [\"timecode\", \"chapter_summary\"],\n", + " },\n", + "}\n", + "\n", + "prompt = \"\"\"\n", + "Chapterize this video content by grouping the video content into chapters and providing a brief summary for each chapter. \n", + "Please only capture key events and highlights. If you are not sure about any info, please do not make it up. \n", + "\"\"\"\n", + "\n", + "video_file = Part.from_uri(\n", + " \"https://storage.googleapis.com/cloud-samples-data/generative-ai/video/rio_de_janeiro_beyond_the_map_rio.mp4\",\n", + " \"video/mp4\",\n", + ")\n", + "\n", + "response = model.generate_content(\n", + " contents=[video_file, prompt],\n", + " generation_config=GenerationConfig(\n", + " response_mime_type=\"application/json\",\n", + " response_schema=response_schema,\n", + " ),\n", + ")\n", + "\n", + "print(response.text)" + ] } ], "metadata": { From 77a756e06905db67ead64f72f65b390148978652 Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Tue, 24 Sep 2024 23:46:29 +0800 Subject: [PATCH 08/76] Added code for dataset validation and removed outdated notebooks from tuning folder. (#1157) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ x] You are listed as the author in your notebook or README file. - [x ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ x] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --------- Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- ...vised_finetuning_text_classification.ipynb | 2884 ----------------- .../tuning/gemini_supervised_tuning_qa.ipynb | 920 ------ ...ning_token_count_and_cost_estimation.ipynb | 219 +- 3 files changed, 205 insertions(+), 3818 deletions(-) delete mode 100644 gemini/tuning/gemini_supervised_finetuning_text_classification.ipynb delete mode 100644 gemini/tuning/gemini_supervised_tuning_qa.ipynb diff --git a/gemini/tuning/gemini_supervised_finetuning_text_classification.ipynb b/gemini/tuning/gemini_supervised_finetuning_text_classification.ipynb deleted file mode 100644 index 8f6bbe39a1..0000000000 --- a/gemini/tuning/gemini_supervised_finetuning_text_classification.ipynb +++ /dev/null @@ -1,2884 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ur8xi4C7S06n" - }, - "outputs": [], - "source": [ - "# Copyright 2024 Google LLC\n", - "#\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JAPoU8Sm5E6e" - }, - "source": [ - "# Supervised Fine-tuning for Text Classification with Gemini\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \"Google
Open in Colab\n", - "
\n", - "
\n", - " \n", - " \"Google
Open in Colab Enterprise\n", - "
\n", - "
\n", - " \n", - " \"Vertex
Open in Workbench\n", - "
\n", - "
\n", - " \n", - " \"GitHub
View on GitHub\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iy3a0zd7phg8" - }, - "source": [ - "| | | |\n", - "|-|-|-|\n", - "|Author(s) | [Gabriela Hernandez Larios](https://github.com/gabrielahrlr) | [Elia Secchi](https://github.com/eliasecchig)|\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "tvgnzT1CKxrO" - }, - "source": [ - "## Overview\n", - "\n", - "This tutorial demonstrates how to perform text classification with Gemini models. From in-context learning (using zero-shot and few-shot learning) to in-weights learning fine-tuning Gemini models for text classification.\n", - "\n", - "### Objective\n", - " We'll cover the development cycle from preparing the dataset, to setting up an evaluation framework to perform text classification tasks using Gemini. Additionally, you'll learn how to create and log experiments, adapting Gemini models to the text classification task with in-context and in-weights (fine-tuning) learning approaches, and compare the performances.\n", - "\n", - "This tutorial uses the following Google Cloud ML Services and Resources:\n", - "\n", - "- Google Cloud Storage\n", - "- Vertex AI Experiments\n", - "- Vertex AI Fine-Tuning\n", - "- Gemini 1.0 Pro\n", - "\n", - "The steps performed include:\n", - "- [Load and split dataset](#scrollTo=EdvJRUWRNGHE&line=1&uniqifier=1)\n", - "- [Evaluation and Experiment Setup](#scrollTo=c2YOsromfcuB&line=6&uniqifier=1)\n", - "- [In-Context learning (zero-shot and few-shot) using Gemini Models](#scrollTo=EfKnRU-SfcuB)\n", - "- [Fine-tuning Gemini 1.0 Pro for text classification](#scrollTo=Qs9eHiL5fcuD)\n", - "- [Comparative Evaluation]()\n", - "- [[Optional] Heuristics for computing Confidence Scores](#scrollTo=KW7wPWQWuQT4)\n", - "\n", - "### Dataset\n", - "The [BBC News dataset](http://mlg.ucd.ie/datasets/bbc.html) consists of 2225 articles from the BBC news website corresponding to five topical areas: business, entertainment, politics, sport, and tech. This dataset was downloaded from http://mlg.ucd.ie/datasets/bbc.html\n", - "\n", - "**Dataset Citation**\n", - "\n", - "```\n", - "@inproceedings{greene06icml,\n", - "\tAuthor = {Derek Greene and P\\'{a}draig Cunningham},\n", - "\tBooktitle = {Proc. 23rd International Conference on Machine learning (ICML'06)},\n", - "\tPages = {377--384},\n", - "\tPublisher = {ACM Press},\n", - "\tTitle = {Practical Solutions to the Problem of Diagonal Dominance in Kernel Document Clustering},\n", - "\tYear = {2006}}\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "61RBz8LLbxCR" - }, - "source": [ - "## Installation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "No17Cw5hgx12" - }, - "source": [ - "### Install Vertex AI SDK for Python and other required packages\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tFy3H3aPgx12" - }, - "outputs": [], - "source": [ - "%pip install --upgrade --user --quiet google-cloud-aiplatform datasets backoff multiprocess gcsfs" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "R5Xep4W9lq-Z" - }, - "source": [ - "### Restart runtime (Colab only)\n", - "\n", - "To use the newly installed packages, you must restart the runtime on Google Colab." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XRvKdaPDTznN" - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " import IPython\n", - "\n", - " app = IPython.Application.instance()\n", - " app.kernel.do_shutdown(True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SbmM4z7FOBpM" - }, - "source": [ - "
\n", - "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", - "
\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "dmWOrTJ3gx13" - }, - "source": [ - "### Authenticate your notebook environment (Colab only)\n", - "\n", - "Authenticate your environment on Google Colab.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NyKGtVQjgx13" - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " from google.colab import auth\n", - "\n", - " auth.authenticate_user()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ILS7NNudfct_" - }, - "source": [ - "### Import libraries\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "XjsxAjgJfct_" - }, - "outputs": [], - "source": [ - "from collections import Counter\n", - "from collections.abc import Callable\n", - "from concurrent.futures import ThreadPoolExecutor\n", - "from functools import partial\n", - "import json\n", - "import traceback\n", - "from typing import Any\n", - "\n", - "import backoff\n", - "\n", - "# Data Handling and Processing\n", - "from datasets import load_dataset\n", - "import gcsfs\n", - "\n", - "# Google Cloud Libraries\n", - "from google.api_core.exceptions import ResourceExhausted\n", - "from google.cloud import aiplatform, storage\n", - "\n", - "# Multiprocessing\n", - "import multiprocess as mp\n", - "import pandas as pd\n", - "from sklearn.metrics import (\n", - " classification_report,\n", - " f1_score,\n", - " precision_score,\n", - " recall_score,\n", - ")\n", - "from sklearn.model_selection import train_test_split\n", - "from tqdm import tqdm\n", - "import vertexai\n", - "from vertexai.generative_models import (\n", - " GenerationConfig,\n", - " GenerativeModel,\n", - " HarmBlockThreshold,\n", - " HarmCategory,\n", - ")\n", - "from vertexai.preview.tuning import sft" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DF4l8DTdWgPY" - }, - "source": [ - "### Set Google Cloud project information, initialize Vertex AI SDK for Python and create a GCS bucket\n", - "\n", - "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Nqwi-5ufWp_B" - }, - "outputs": [], - "source": [ - "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", - "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", - "\n", - "vertexai.init(project=PROJECT_ID, location=LOCATION)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "C5r1Bhf7fcuA" - }, - "outputs": [], - "source": [ - "BUCKET_NAME = \"[your-bucket-name]\" # @param {type:\"string\"}\n", - "BUCKET_URI = f\"gs://{BUCKET_NAME}\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eSZS8QfOfcuA" - }, - "source": [ - "**warning:** Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "QXxi2tC2fcuA" - }, - "outputs": [], - "source": [ - "!gsutil mb -l $LOCATION -p $PROJECT_ID $BUCKET_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pSZ8-hwafcuA" - }, - "source": [ - "Finally, validate access to your Cloud Storage bucket by examining its contents" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XAEAlYH2fcuA" - }, - "source": [ - "### Helper Functions" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f6sph8etfcuA" - }, - "source": [ - "#### Batch Prediction - Helper functions\n", - "\n", - "These helper functions streamline batch predictions using parallelization and multithreading with online Gemini Models. Gemini also offers the possibility to [perform batch text generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini) in **Public Preview** (July 2024). " - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": { - "id": "Fz4tGANnfcuA" - }, - "outputs": [], - "source": [ - "def backoff_hdlr(details) -> None:\n", - " \"\"\"\n", - " Handles backoff events.\n", - "\n", - " Args:\n", - " details: A dictionary containing information about the backoff event.\n", - " \"\"\"\n", - " print(f\"Backing off {details['wait']:.1f} seconds after {details['tries']} tries\")\n", - "\n", - "\n", - "def log_error(msg: str, *args: Any) -> None:\n", - " \"\"\"\n", - " Logs an error message and raises an exception.\n", - "\n", - " Args:\n", - " msg: The error message.\n", - " *args: Additional arguments to be passed to the logger.\n", - " \"\"\"\n", - " mp.get_logger().error(msg, *args)\n", - " raise Exception(msg)\n", - "\n", - "\n", - "def handle_exception_threading(f: Callable) -> Callable:\n", - " \"\"\"\n", - " A decorator that handles exceptions in a threaded environment.\n", - "\n", - " Args:\n", - " f: The function to decorate.\n", - "\n", - " Returns:\n", - " The decorated function.\n", - " \"\"\"\n", - "\n", - " def applicator(*args: Any, **kwargs: Any) -> Any:\n", - " try:\n", - " return f(*args, **kwargs)\n", - " except:\n", - " log_error(traceback.format_exc())\n", - "\n", - " return applicator\n", - "\n", - "\n", - "@handle_exception_threading\n", - "@backoff.on_exception(\n", - " backoff.expo, ResourceExhausted, max_tries=30, on_backoff=backoff_hdlr\n", - ")\n", - "def _predict_message(message: str, model: GenerativeModel) -> str | None:\n", - " \"\"\"\n", - " Predict messages\n", - "\n", - " Args:\n", - " message: The message to predict.\n", - " model: The GenerativeModel to use for prediction.\n", - "\n", - " Returns:\n", - " The predicted message, or None if an error occurred.\n", - " \"\"\"\n", - " response = model.generate_content([message], stream=False)\n", - " return response.text\n", - "\n", - "\n", - "def batch_predict(\n", - " messages: list[str], model: GenerativeModel, max_workers: int = 4\n", - ") -> list[str | None]:\n", - " \"\"\"\n", - " Predicts the classes for a list of messages\n", - "\n", - " Args:\n", - " - messages: list of all messages to predict\n", - " - model: model to use for predicting.\n", - " - max_workers: number of workers to use for parallel predictions\n", - "\n", - " Returns:\n", - " - list of predicted labels\n", - "\n", - " \"\"\"\n", - " predictions = list()\n", - " with ThreadPoolExecutor(max_workers) as pool:\n", - " partial_func = partial(_predict_message, model=model)\n", - " for message in tqdm(pool.map(partial_func, messages), total=len(messages)):\n", - " predictions.append(message)\n", - "\n", - " return predictions" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0tKVjsJKfcuA" - }, - "source": [ - "#### Vertex AI Experiment Helper\n", - "We will define a `VertexAIExperimentManager` class to simplify the creation, logging and runs management of experiments using Vertex AI Experiments." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "f1696a56739c" - }, - "outputs": [], - "source": [ - "class VertexAIExperimentManager:\n", - " \"\"\"\n", - " A class for managing experiments and runs in Vertex AI.\n", - " This class encapsulates the functionality for creating experiments, logging runs,\n", - " and retrieving experiment data in Vertex AI.\n", - " \"\"\"\n", - "\n", - " def __init__(self, project: str, location: str):\n", - " self.project = project\n", - " self.location = location\n", - " self.current_experiment = None\n", - "\n", - " def init_experiment(\n", - " self, experiment_name: str, experiment_description: str | None = None\n", - " ):\n", - " \"\"\"Initialize or switch to a specific experiment.\"\"\"\n", - " self.current_experiment = experiment_name\n", - " aiplatform.init(\n", - " experiment=experiment_name,\n", - " experiment_description=experiment_description,\n", - " experiment_tensorboard=False,\n", - " project=self.project,\n", - " location=self.location,\n", - " )\n", - "\n", - " def create_experiment(\n", - " self, experiment_name: str, experiment_description: str | None = None\n", - " ) -> None:\n", - " \"\"\"Create an Experiment on Vertex AI Experiments\"\"\"\n", - " self.init_experiment(experiment_name, experiment_description)\n", - "\n", - " def log_run(\n", - " self, run_name: str, params: dict[str, Any], metrics: dict[str, Any]\n", - " ) -> None:\n", - " \"\"\"Log experiment run data to Vertex AI Experiments.\"\"\"\n", - " if not self.current_experiment:\n", - " raise ValueError(\"No experiment initialized. Call init_experiment first.\")\n", - "\n", - " aiplatform.start_run(run=run_name)\n", - " aiplatform.log_params(params)\n", - " aiplatform.log_metrics(metrics)\n", - " aiplatform.end_run()\n", - "\n", - " def get_experiments_data_frame(self) -> pd.DataFrame | None:\n", - " \"\"\"Retrieve a DataFrame of experiment data from Vertex AI Experiments.\"\"\"\n", - " if not self.current_experiment:\n", - " raise ValueError(\"No experiment initialized. Call init_experiment first.\")\n", - "\n", - " return aiplatform.get_experiment_df()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b7d25db3a772" - }, - "source": [ - "#### Helper Functions for Data Transformation \n", - "This section contains helper functions designated to transform data from different formats into the specific format required for fine-tuning Gemini models on Vertex AI. These functions handle:\n", - "\n", - "- Pandas DataFrames\n", - "- CSV files previously used for training AutoML text classifiers\n", - "- JSONL files previously used for training AutoML text classifiers\n", - "\n", - "It also includes a function to validate the transformed dataset, ensuring it adheres to the correct format and roles for Gemini fine-tuning on Vertex AI." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3f593ae8d3f3" - }, - "outputs": [], - "source": [ - "def create_gemini_messages(\n", - " text: str, label: str, system_prompt: str | None = None\n", - ") -> dict:\n", - " messages = []\n", - " if system_prompt:\n", - " messages.append({\"role\": \"system\", \"content\": system_prompt})\n", - " messages.extend(\n", - " [\n", - " {\"role\": \"user\", \"content\": text},\n", - " {\"role\": \"model\", \"content\": label},\n", - " ]\n", - " )\n", - " return {\"messages\": messages}\n", - "\n", - "\n", - "def prepare_tuning_dataset_from_df(\n", - " tuning_df: pd.DataFrame, system_prompt: str | None = None\n", - ") -> pd.DataFrame:\n", - " \"\"\"\n", - " Prepares a tuning dataset from a pandas DataFrame for Gemini fine-tuning.\n", - " Args:\n", - " tuning_df: A pandas DataFrame with columns \"text\" and \"label_text\".\n", - " system_prompt: An optional system prompt for zero-shot learning.\n", - " Returns:\n", - " A pandas DataFrame containing the data in the Gemini tuning format.\n", - " \"\"\"\n", - " tuning_dataset = [\n", - " create_gemini_messages(row[\"text\"], row[\"label_text\"], system_prompt)\n", - " for _, row in tuning_df.iterrows()\n", - " ]\n", - " return pd.DataFrame(tuning_dataset)\n", - "\n", - "\n", - "def convert_tuning_dataset_from_automl_csv(\n", - " automl_gcs_csv_path: str,\n", - " system_prompt: str | None = None,\n", - " partition: str = \"training\",\n", - ") -> pd.DataFrame:\n", - " \"\"\"\n", - " Converts an AutoML CSV dataset for text classification to the Gemini tuning format.\n", - " Args:\n", - " automl_gcs_csv_path: The GCS path to the AutoML CSV dataset.\n", - " system_prompt: The instructions to the model.\n", - " partition: The partition to extract from the dataset (e.g., \"training\", \"validation\", \"test\"). Defaults to \"training\".\n", - " Returns:\n", - " A pandas DataFrame containing the data in the Gemini tuning format.\n", - " \"\"\"\n", - " df = pd.read_csv(automl_gcs_csv_path, names=[\"partition\", \"text\", \"label\"])\n", - " df_automl = df.loc[df[\"partition\"] == partition]\n", - " gemini_dataset = [\n", - " create_gemini_messages(row[\"text\"], row[\"label\"], system_prompt)\n", - " for _, row in df_automl.iterrows()\n", - " ]\n", - " return pd.DataFrame(gemini_dataset)\n", - "\n", - "\n", - "def convert_tuning_dataset_from_automl_jsonl(\n", - " project_id: str,\n", - " automl_gcs_jsonl_path: str,\n", - " system_prompt: str | None = None,\n", - " partition: str = \"training\",\n", - ") -> pd.DataFrame:\n", - " \"\"\"\n", - " Converts an AutoML JSONL dataset for text classification to the Gemini tuning format.\n", - " Args:\n", - " automl_gcs_jsonl_path: The GCS path to the AutoML JSONL dataset for text classification.\n", - " system_prompt: The instructions to the model.\n", - " partition: The partition to extract from the dataset (e.g., \"training\", \"validation\", \"test\"). Defaults to \"training\".\n", - " Returns:\n", - " A pandas DataFrame containing the data in the Gemini tuning format.\n", - " \"\"\"\n", - " processed_data = []\n", - " gcs_file_system = gcsfs.GCSFileSystem(project=project_id)\n", - " with gcs_file_system.open(automl_gcs_jsonl_path) as f:\n", - " for line in f:\n", - " data = json.loads(line)\n", - " processed_data.append(\n", - " {\n", - " \"label\": data[\"classificationAnnotation\"][\"displayName\"],\n", - " \"text\": data[\"textContent\"],\n", - " \"partition\": data[\"dataItemResourceLabels\"][\n", - " \"aiplatform.googleapis.com/ml_use\"\n", - " ],\n", - " }\n", - " )\n", - "\n", - " df = pd.DataFrame(processed_data)\n", - " df_automl = df.loc[df[\"partition\"] == partition]\n", - " gemini_dataset = [\n", - " create_gemini_messages(row[\"text\"], row[\"label\"], system_prompt)\n", - " for _, row in df_automl.iterrows()\n", - " ]\n", - " return pd.DataFrame(gemini_dataset)\n", - "\n", - "\n", - "def validate_gemini_tuning_jsonl(gcs_jsonl_path: str) -> list[dict]:\n", - " \"\"\"\n", - " Validates a JSONL file on Google Cloud Storage against the Gemini tuning format.\n", - "\n", - " Args:\n", - " gcs_jsonl_path: The GCS path to the JSONL file.\n", - "\n", - " Returns:\n", - " A list of dictionaries representing the errors found in the file.\n", - " Each dictionary has the following structure:\n", - " {\n", - " \"error_type\": \"Error description\",\n", - " \"row_index\": The index of the row where the error occurred,\n", - " \"message\": The error message\n", - " }\n", - " \"\"\"\n", - "\n", - " errors = []\n", - " storage_client = storage.Client()\n", - " blob = storage.Blob.from_string(uri=gcs_jsonl_path, client=storage_client)\n", - "\n", - " with blob.open(\"r\") as f:\n", - " for row_index, line in enumerate(f):\n", - " try:\n", - " data = json.loads(line)\n", - " # Check for the presence of the \"messages\" key\n", - " if \"messages\" not in data:\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"Missing 'messages' key\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"Row {row_index} is missing the 'messages' key.\",\n", - " }\n", - " )\n", - " continue\n", - "\n", - " messages = data[\"messages\"]\n", - " # Check if \"messages\" is a list\n", - " if not isinstance(messages, list):\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"Invalid 'messages' type\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"Row {row_index}: 'messages' is not a list.\",\n", - " }\n", - " )\n", - " continue\n", - "\n", - " # Validate each message in the \"messages\" list\n", - " for message_index, message in enumerate(messages):\n", - " if not isinstance(message, dict):\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"Invalid message format\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"\"\"Row {row_index},\n", - " message {message_index}: Message is not a dictionary.\"\"\",\n", - " }\n", - " )\n", - " continue\n", - "\n", - " # Check for required keys in each message dictionary\n", - " if \"role\" not in message or \"content\" not in message:\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"Missing 'role' or 'content' key\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"Row {row_index}, message {message_index}: \"\n", - " \"Missing 'role' or 'content' key.\",\n", - " }\n", - " )\n", - " continue\n", - "\n", - " # Check for valid role values\n", - " if message[\"role\"] not in [\"system\", \"user\", \"model\"]:\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"Invalid 'role' value\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"\"\"Row {row_index}, message {message_index}:\n", - " Invalid 'role' value. Expected 'system', 'user', or 'model'.\"\"\",\n", - " }\n", - " )\n", - " continue\n", - "\n", - " except json.JSONDecodeError as e:\n", - " errors.append(\n", - " {\n", - " \"error_type\": \"JSON Decode Error\",\n", - " \"row_index\": row_index,\n", - " \"message\": f\"Row {row_index}: JSON decoding error: {e}\",\n", - " }\n", - " )\n", - "\n", - " return errors" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "EdvJRUWRNGHE" - }, - "source": [ - "## 1. Load and Splitting Dataset\n", - "In this step, we will load the raw data and create training, validation and test sets. Later these datasets will be used to perform different types of adaptations to Gemini models for the task under consideration.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2f475c9eceb9" - }, - "source": [ - "Load the dataset from Hugging Face" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "k70Yb_XFfcuA" - }, - "outputs": [], - "source": [ - "datasets = load_dataset(\"SetFit/bbc-news\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "129ed33e0795" - }, - "source": [ - "Store in Pandas Dataframes the train and test partitions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rAi4NI2MfcuA" - }, - "outputs": [], - "source": [ - "train = pd.DataFrame(datasets[\"train\"])\n", - "test = pd.DataFrame(datasets[\"test\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "485a5aadee60" - }, - "source": [ - "We now take a quick look to the data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OcTrjjGTfcuA" - }, - "outputs": [], - "source": [ - "train.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "656325ef857f" - }, - "source": [ - "We want to check the distribution of the label values" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "SSfs2R-dfcuB" - }, - "outputs": [], - "source": [ - "train.label_text.value_counts()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZmAsTaDIfcuB" - }, - "outputs": [], - "source": [ - "print(train.shape)\n", - "print(test.shape)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "db7764b1824f" - }, - "source": [ - "We are going to partition the test data into validation and test datasets, in order to have three datasets, namely train, val (validation) and test datasets. To perform evaluations.\n", - "\n", - "Test size will be slightly larger than validation, as while fine-tuning Gemini the validation dataset can only be max 256 rows." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "kRYqeB-kfcuB" - }, - "outputs": [], - "source": [ - "val, test = train_test_split(\n", - " test, test_size=0.75, shuffle=True, stratify=test[\"label_text\"], random_state=2\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "sRtiloYgfcuB" - }, - "outputs": [], - "source": [ - "print(val.shape)\n", - "print(test.shape)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3932269eaf0e" - }, - "source": [ - "Verify that the values of the label column are following a similar distribution, in order to have comparable evaluations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ySB5n2ZwfcuB" - }, - "outputs": [], - "source": [ - "val.label_text.value_counts()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ec1u71uzfcuB" - }, - "outputs": [], - "source": [ - "test.label_text.value_counts()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c2YOsromfcuB" - }, - "source": [ - "## 2. Evaluation and Experiment Setup\n", - "We will create the required functions to evaluate our experiments and to log them in Vertex Experiments.\n", - "\n", - "\n", - "### Evaluation Setup\n", - "For this text classification task, we will use the below classification metrics to evaluate the performance of the models and it different adaptations. We will track the below metrics in our development.\n", - "\n", - "- Overall Micro-F1\n", - "- Overall Macro-F1\n", - "- Overall Accuracy\n", - "- Overall Weighted Precision\n", - "- Overall Weighted Recall\n", - "- F1-Score (overall and per class)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "68819c8f056f" - }, - "source": [ - "The below functions would allow us to " - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "jWbdf4gmfcuB" - }, - "outputs": [], - "source": [ - "def predictions_postprocessing(text: str) -> str:\n", - " \"\"\"\n", - " Cleans the predicted class label string.\n", - "\n", - " Args:\n", - " text (str): The predicted class label string.\n", - "\n", - " Returns:\n", - " str: The cleaned class label string.\n", - " \"\"\"\n", - " return text.strip().lower()\n", - "\n", - "\n", - "def evaluate_predictions(\n", - " df: pd.DataFrame,\n", - " target_column: str = \"label_text\",\n", - " predictions_column: str = \"predicted_labels\",\n", - " postprocessing: bool = True,\n", - ") -> dict[str, float]:\n", - " \"\"\"\n", - " Batch evaluation of predictions, returns a dictionary with the metric.\n", - "\n", - " Args:\n", - " - df (pandas.DataFrame): a pandas dataframe with two mandatory columns, a target column with\n", - " the actual true values, and a predictions column with the predicted values.\n", - " - target_column (str): column name with the actual ground truth values\n", - " - predictions_column (str): column name with the model predictions\n", - " - postprocessing (bool): whether to apply postprocessing to predictions.\n", - "\n", - " Returns:\n", - " Dict[str, float]: Dictionary of evaluation metrics.\n", - " \"\"\"\n", - " if postprocessing:\n", - " df[predictions_column] = df[predictions_column].apply(\n", - " predictions_postprocessing\n", - " )\n", - "\n", - " y_true = df[target_column]\n", - " y_pred = df[predictions_column]\n", - "\n", - " metrics_report = classification_report(y_true, y_pred, output_dict=True)\n", - " overall_macro_f1_score = f1_score(y_true, y_pred, average=\"macro\")\n", - " overall_micro_f1_score = f1_score(y_true, y_pred, average=\"micro\")\n", - " weighted_precision = precision_score(y_true, y_pred, average=\"weighted\")\n", - " weighted_recall = recall_score(y_true, y_pred, average=\"weighted\")\n", - "\n", - " metrics = {\n", - " \"accuracy\": metrics_report[\"accuracy\"],\n", - " \"weighted precision\": weighted_precision,\n", - " \"weighted recall\": weighted_recall,\n", - " \"macro f1\": overall_macro_f1_score,\n", - " \"micro f1\": overall_micro_f1_score,\n", - " }\n", - "\n", - " categories = [\"business\", \"sport\", \"politics\", \"tech\", \"entertainment\"]\n", - " for category in categories:\n", - " if category in metrics_report:\n", - " metrics[f\"{category}_f1_score\"] = metrics_report[category][\"f1-score\"]\n", - "\n", - " return metrics" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6g3sHozyfcuB" - }, - "source": [ - "### Experiment Setup\n", - "Before starting the development and experimentation process, we will setup Vertex AI Experiments, in order to log all the experiments we run and compare them using our defined metrics. \n", - "\n", - "In this part we will use some of the helper functions we defined in the [helper functions section](#scrollTo=0tKVjsJKfcuA), to create an experiment where we will log all our different runs.\n", - "\n", - "For more information about Vertex Experiments, please refer to its [documentation](https://cloud.google.com/vertex-ai/docs/experiments/intro-vertex-ai-experiments)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "J9wT-nkQOeik" - }, - "outputs": [], - "source": [ - "EXPERIMENT_NAME = \"[your-experiment]\" # @param {type:\"string\"}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vYWhf5YTfcuB" - }, - "outputs": [], - "source": [ - "experiment_manager = VertexAIExperimentManager(project=PROJECT_ID, location=LOCATION)\n", - "experiment_manager.create_experiment(\n", - " experiment_name=EXPERIMENT_NAME,\n", - " experiment_description=\"Fine-tuning Gemini 1.0 Pro for text classification\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "e5f7171b3f93" - }, - "source": [ - "We will create an evaluation DataFrame from our Test dataset, where we will store the predictions from all the experiments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "2nQymMebwJ-Y" - }, - "outputs": [], - "source": [ - "# Create an Evaluation dataframe to store the predictions from all the experiments.\n", - "df_evals = test.copy()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "EfKnRU-SfcuB" - }, - "source": [ - "## 3. In-Context Adaptation using Gemini models\n", - "\n", - "In this section we'll do in-context learning to instruct Gemini models to perform the text classification task under consideration, using zero-shot and few-shot prompt engineering techniques. \n", - "\n", - "The prompts presented in this section are crafted for this task, and in our experiments they demonstrate superior results compared to other simpler prompts.\n", - "\n", - "**Before fine-tuning a model, it is important to find the best prompt**: system instructions, examples, structure, etc., for the task under consideration. This will permit to get an understanding of which prompt works the best for the used model, and even boost more the performances when fine-tuning.\n", - "\n", - "In this Colab, we are using Gemini 1.0 Pro, in order to compare the performances of the frozen model and after fine-tuning. But you can reuse this code to test also Gemini 1.5 Pro and Gemini 1.5 Flash by changing the model name in the code.\n", - "\n", - "**Note:** Prompt Engineering is model-dependent. We recommend you to experiment with different prompting techniques per model. Techniques like Chain-of-Thought can increase performances, as well as Dynamic Few-Shots (using a RAG system to dynamically integrate the examples that are similar to the user input)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "62ec9a0238de" - }, - "source": [ - "### Prompts Definition\n", - "\n", - "We create the prompts we want to use for our experiments. In this case, we define two: zero-shot and few-shot prompts." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FO5XWYDBfcuB" - }, - "outputs": [], - "source": [ - "system_prompt_zero_shot = \"\"\"TASK:\n", - "Classify the text into ONLY one of the following classes [business, entertainment, politics, sport, tech].\n", - "\n", - "CLASSES:\n", - "- business\n", - "- entertainment\n", - "- politics\n", - "- sport\n", - "- tech\n", - "\n", - "INSTRUCTIONS\n", - "- Respond with ONLY one class.\n", - "- You MUST use the exact word from the list above.\n", - "- DO NOT create or use any other classes.\n", - "- CAREFULLY analyze the text before choosing the best-fitting category from [business, entertainment, politics, sport, tech].\n", - "\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7ne1zh7hfcuB" - }, - "source": [ - "For the few-shot prompt, we'll randomly pick an example from each category using the `train` dataset we previously computed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FApEzz62fcuB" - }, - "outputs": [], - "source": [ - "system_prompt_few_shot = f\"\"\"TASK:\n", - "Classify the text into ONLY one of the following classes [business, entertainment, politics, sport, tech].\n", - "\n", - "CLASSES:\n", - "- business\n", - "- entertainment\n", - "- politics\n", - "- sport\n", - "- tech\n", - "\n", - "INSTRUCTIONS:\n", - "- Respond with ONLY one class.\n", - "- You MUST use the exact word from the list above.\n", - "- DO NOT create or use any other classes.\n", - "- CAREFULLY analyze the text before choosing the best-fitting category from [business, entertainment, politics, sport, tech].\n", - "\n", - "EXAMPLES:\n", - "- EXAMPLE 1:\n", - " \n", - " {train.loc[train[\"label_text\"] == \"business\", \"text\"].iloc[10]}\n", - " \n", - " {train.loc[train[\"label_text\"] == \"business\", \"label_text\"].iloc[10]}\n", - "\n", - "- EXAMPLE 2:\n", - " \n", - " {train.loc[train[\"label_text\"] == \"entertainment\", \"text\"].iloc[10]}\n", - " \n", - " {train.loc[train[\"label_text\"] == \"entertainment\", \"label_text\"].iloc[10]}\n", - "\n", - "- EXAMPLE 3:\n", - " \n", - " {train.loc[train[\"label_text\"] == \"politics\", \"text\"].iloc[10]}\n", - " \n", - " {train.loc[train[\"label_text\"] == \"politics\", \"label_text\"].iloc[10]}\n", - "\n", - "- EXAMPLE 4:\n", - " \n", - " {train.loc[train[\"label_text\"] == \"sport\", \"text\"].iloc[10]}\n", - " \n", - " {train.loc[train[\"label_text\"] == \"sport\", \"label_text\"].iloc[10]}\n", - "\n", - "- EXAMPLE 4:\n", - " \n", - " {train.loc[train[\"label_text\"] == \"tech\", \"text\"].iloc[10]}\n", - " \n", - " {train.loc[train[\"label_text\"] == \"tech\", \"label_text\"].iloc[10]}\n", - "\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FMVdJ2EffcuC" - }, - "source": [ - "For the below evaluations, we'll use the respective functions we have already set up. For in-context learning, we recommend to use the validation set to find the optimal performance and then apply it to the test set, to make sure the metrics remain consistent. In this notebook, we'll directly evaluate on the test dataset, as the validation and prompt engineering part has been already done." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "42bc85d751c5" - }, - "source": [ - "### Model Configuration Setup\n", - "\n", - "We are going to define the generation configuration for doing the text classification task, and keep the same configuration across all of our experiments (both in-context and in-weights). \n", - "\n", - "We configure the temperature to 0, to make it as grounded as possible, and max output tokens to 10, as the categories are only one word, we don't need more than that.\n", - "\n", - "We are also going to set the safety filters to only block responses which have high severity scores across all four categories. For more information about the Safety configurations, please refer to the [official documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-attributes)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "b61b3754a752" - }, - "outputs": [], - "source": [ - "generation_config = GenerationConfig(max_output_tokens=10, temperature=0)\n", - "\n", - "safety_settings = {\n", - " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", - " HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", - " HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", - " HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wF_tas6BfcuC" - }, - "source": [ - "### 3.1 Gemini 1.0 Pro in-context Evaluation\n", - "\n", - "#### Zero-Shot Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2460cc454327" - }, - "source": [ - "First we will compute the predictions using the frozen model with a prompt without examples (i.e. using the `system_prompt_zero_shot` prompt). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OLitVLOFqPQe" - }, - "outputs": [], - "source": [ - "gem_pro_1_model_zero = GenerativeModel(\n", - " \"gemini-1.0-pro-002\", # e.g. gemini-1.5-pro-001, gemini-1.5-flash-001\n", - " system_instruction=[system_prompt_zero_shot],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "cecce00b47f1" - }, - "source": [ - "We convert the texts we want to predict to a list and run the online inference parallelizing the calls." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "1D2vrHjawsZH" - }, - "outputs": [], - "source": [ - "# Get the list of messages to predict\n", - "messages_to_predict = test[\"text\"].to_list()\n", - "# Compute the preictions\n", - "predictions_zero_shot = batch_predict(\n", - " messages=messages_to_predict, model=gem_pro_1_model_zero, max_workers=4\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "00a367149153" - }, - "source": [ - "We store the predictions in the DataFrame we previously defined for storing all the evaluations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ufTv6Jx0wwIZ" - }, - "outputs": [], - "source": [ - "df_evals[\"gem1.0-zero-shot_predictions\"] = predictions_zero_shot\n", - "len(predictions_zero_shot)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f6964e41ed87" - }, - "source": [ - "We compute the evaluation metrics for each text, using the zero-shot prompt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "P0h3Tr0LwwBX" - }, - "outputs": [], - "source": [ - "# Compute Evaluation Metrics for zero-shot prompt\n", - "metrics_zero_shot = evaluate_predictions(\n", - " df_evals.copy(),\n", - " target_column=\"label_text\",\n", - " predictions_column=\"gem1.0-zero-shot_predictions\",\n", - " postprocessing=True,\n", - ")\n", - "metrics_zero_shot" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b9386224c78b" - }, - "source": [ - "We finally log the run in the experiment we created in Vertex AI Experiments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Aa6HgjDNwv4y" - }, - "outputs": [], - "source": [ - "# Log Experiment with zero-shot Prompt with Gemini 1.0 Pro\n", - "params = {\n", - " \"model\": \"gemini-1.0-pro-002\",\n", - " \"adaptation_type\": \"in-context zero-shot\",\n", - " \"temperature\": 0,\n", - " \"max_output_tokens\": 10,\n", - "}\n", - "\n", - "experiment_manager.log_run(\n", - " run_name=\"gemini-1-0-pro-002-zero-shot\", params=params, metrics=metrics_zero_shot\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "40b221630b0f" - }, - "source": [ - "#### Few-shot Evaluation\n", - "\n", - "We will now conduct experiments adding examples to our prompt to steer the model. For this, we will use the `system_prompt_few_shot` prompt." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Znl2x6ZSw7cs" - }, - "outputs": [], - "source": [ - "# Test Few-Shot, and other prompts/possibilities\n", - "gem_pro_1_model_few = GenerativeModel(\n", - " \"gemini-1.0-pro-002\",\n", - " system_instruction=[system_prompt_few_shot],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "692f3a86f3ee" - }, - "source": [ - "We convert the texts we want to predict to a list and run the online inference parallelizing the calls." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NM7OU8hZfcuC" - }, - "outputs": [], - "source": [ - "predictions_few_shot = batch_predict(\n", - " messages=messages_to_predict, model=gem_pro_1_model_few\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d60c0128f81d" - }, - "source": [ - "We store the predictions on our designated DataFrame" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AwUABEdnfcuC" - }, - "outputs": [], - "source": [ - "df_evals[\"gem1.0-few-shot_predictions\"] = predictions_few_shot\n", - "len(predictions_few_shot)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b3bc3de3735e" - }, - "source": [ - "We compute the evaluation metrics for each text, using the zero-shot prompt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "X6CrJ1NzfcuC" - }, - "outputs": [], - "source": [ - "# Compute Evaluation Metrics for few-shot prompt\n", - "metrics_few_shot = evaluate_predictions(\n", - " df_evals.copy(),\n", - " target_column=\"label_text\",\n", - " predictions_column=\"gem1.0-few-shot_predictions\",\n", - " postprocessing=True,\n", - ")\n", - "metrics_few_shot" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c9182f894d12" - }, - "source": [ - "And finally, we also log this run in our experiment, for comparison purposes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Qvex8Jd2fcuC" - }, - "outputs": [], - "source": [ - "# Log Experiment with Few-Shot Prompt with Gemini 1.0 Pro\n", - "\n", - "params = {\n", - " \"model\": \"gemini-1.0-pro-002\",\n", - " \"adaptation_type\": \"in-context few-shot\",\n", - " \"temperature\": 0,\n", - " \"max_output_tokens\": 10,\n", - "}\n", - "\n", - "experiment_manager.log_run(\n", - " run_name=\"gemini-1-0-pro-few-shot\", params=params, metrics=metrics_few_shot\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Qs9eHiL5fcuD" - }, - "source": [ - "## 4. Fine-tuning (Parameter Efficient) Gemini 1.0 Pro\n", - "Supervised fine-tuning helps adapt foundation models to new tasks using smaller, highly relevant datasets. To ensure success, focus on:\n", - "\n", - "- Using domain-specific data: Choose data closely matching your real-world use case.\n", - "- Accurate labeling: High-quality annotations are crucial.\n", - "- Clean data: Remove duplicates, fix errors, and ensure relevance to your task.\n", - "- Diverse but focused examples: Include variety within your target domain, avoiding irrelevant data.\n", - "- Balanced classes (for classification): Maintain a balance to prevent bias towards a specific class.\n", - "\n", - "### 4.1 Prepare tuning and validation datasets for fine-tuning Gemini Models on Vertex AI\n", - "\n", - "Training data should be structured within a JSONL file located at a Google Cloud Storage (GCS) URI. Each line (or row) of the JSONL file must adhere to a specific schema: It should contain a \"messages\" array, with objects inside defining a \"role\" (\"system\" for the system context, \"user\" for user input or \"model\" for model output) and the corresponding text \"content\". For example, a valid data row would look like this:\n", - "\n", - "```\n", - "{\n", - " \"messages\": [\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": \"You should classify the text into one of the following classes:[business, entertainment]\"\n", - " },\n", - " { \"role\": \"user\", \"content\": \"Diversify your investment portfolio\" },\n", - " { \"role\": \"model\", \"content\": \"business\" }\n", - " ]\n", - "}\n", - "```\n", - "\n", - "The role \"system\" is optional. You can find more information about the dataset format and preparation in the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-about)\n", - "\n", - "To run a tuning job, you need to upload your tuning and validation(optional) datasets to a Cloud Storage bucket. You can either create a new Cloud Storage bucket or use an existing one to store dataset files. We recommend that you use a bucket that's in the same Google Cloud project where you plan to tune your model.\n", - "\n", - "\n", - "In this section, we will provide guidelines to prepare the training and validation (optional) datasets based on three options:\n", - "\n", - "1. [Option 1] From scratch, using the datasets we loaded and splitted at the beginning of this notebook.\n", - "\n", - "1. [Option 2] Providing a function to convert an AutoML Dataset on CSV format to the expected format to fine-tune and validate Gemini Models.\n", - "\n", - "1. [Option 3] Providing a function to convert an AutoML Dataset on JSONL format to the expected format to fine-tune and validate Gemini Models.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QvhGIruMfcuD" - }, - "source": [ - "#### [Option 1] Prepare tuning and validation datasets from scratch\n", - "\n", - "We need to prepare our training and validaiton (optional) datasets for the text classification task. It is recommended to add a system role within the instructions on how to classify. Since we are going to fine-tune the model, the need to add few-shot examples as part of the prompt is eliminated, and therefore we will reuse the `system_prompt_zero_shot` that we used previously." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "a8b2056948df" - }, - "source": [ - "##### Prepare Tuning Dataset for fine-tuning Gemini\n", - "\n", - "We will create the tuning dataset by using our previously created `train` DataFrame, and formatting it in the expected structure." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "f72a891e5877" - }, - "outputs": [], - "source": [ - "tuning_gemini_df = prepare_tuning_dataset_from_df(\n", - " tuning_df=train, system_prompt=system_prompt_zero_shot\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d91368a68250" - }, - "source": [ - "Let's take a look at how it looks" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "f3951e8901a7" - }, - "outputs": [], - "source": [ - "tuning_gemini_df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "89451ccbabec" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bQQM8MZNfcuD" - }, - "outputs": [], - "source": [ - "# store tuning dataset in GCS\n", - "tuning_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/tuning_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "\n", - "tuning_gemini_df.to_json(tuning_data_gcs_path, orient=\"records\", lines=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "47671b94765d" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "c7b717c38e76" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=tuning_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ab4c1c9e5b7a" - }, - "source": [ - "##### Prepare Validation Dataset for Fine-tuning Gemini 1.0 Pro\n", - "We do the same but now using the validation dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "b48ac0b8d5b0" - }, - "outputs": [], - "source": [ - "validation_gemini_df = prepare_tuning_dataset_from_df(\n", - " tuning_df=val, system_prompt=system_prompt_zero_shot\n", - ")\n", - "validation_gemini_df.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "c1553424ac2e" - }, - "outputs": [], - "source": [ - "validation_gemini_df.__len__()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3323aff3540c" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "pGnLQB24fcuD" - }, - "outputs": [], - "source": [ - "# store validation dataset in GCS\n", - "validation_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/validation_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "validation_gemini_df.to_json(validation_data_gcs_path, orient=\"records\", lines=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "77e8f4c89828" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5191953bb448" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=validation_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ebee4ef624a0" - }, - "source": [ - "------------------" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5D4sezIqfcuD" - }, - "source": [ - "#### [Option 2] Transform AutoML CSV training dataset format to the expected data format for fine-tuning Gemini.\n", - "If you were previously using Vertex AI AutoML for text classification, and you have your data in the below csv format expected by AutoML:\n", - "\n", - "```\n", - "[ml_use],gcs_file_uri|\"inline_text\",label\n", - "```\n", - "\n", - "```\n", - "test,\"inline_text\",label1\n", - "test,\"inline_text\",label2\n", - "training,\"inline_text\",label3\n", - "validation,\"inline_text\",label1\n", - "```\n", - "\n", - " In the file `data_transformations_tuning.py` we have the function ` def convert_tuning_dataset_from_automl_csv(...)` to convert AutoML CSV datasets for text classification to the format expected for the tuning dataset to fine-tune Gemini models." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "19f64486390c" - }, - "source": [ - "##### Prepare tuning dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "q5mzsiDl0CMq" - }, - "outputs": [], - "source": [ - "# Usage Example for Training dataset\n", - "gcs_path_automl_dataset = (\n", - " \"gs:///.csv\" # @param {type: \"string\"}\n", - ")\n", - "df_gemini_tuning = convert_tuning_dataset_from_automl_csv(\n", - " automl_gcs_csv_path=gcs_path_automl_dataset,\n", - " system_prompt=system_prompt_zero_shot,\n", - " partition=\"training\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ea54c13d61a8" - }, - "outputs": [], - "source": [ - "df_gemini_tuning.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "a4cd727c6fb4" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "c8f2ed3f7ecf" - }, - "outputs": [], - "source": [ - "# store tuning dataset in GCS\n", - "gemini_tuning_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/tuning_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "df_gemini_tuning.to_json(gemini_tuning_data_gcs_path, orient=\"records\", lines=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1a64207f7529" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "05eff9b207e1" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=gemini_tuning_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5d93823802fa" - }, - "source": [ - "##### Prepare Validation Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c21d1d425c20" - }, - "source": [ - "We will repeat the same process for the validation dataset in case there is one available. It is not mandatory to provide a validation dataset when fine-tuning Gemini, but rather optional." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RIBlBE2q0yYp" - }, - "outputs": [], - "source": [ - "# Usage Example for validation dataset\n", - "gcs_path_automl_dataset = (\n", - " \"gs:///.csv\" # @param {type: \"string\"}\n", - ")\n", - "df_gemini_validation = convert_tuning_dataset_from_automl_csv(\n", - " automl_gcs_csv_path=gcs_path_automl_dataset,\n", - " system_prompt=system_prompt_zero_shot,\n", - " partition=\"validation\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3384b71bef8a" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0abb34eb080b" - }, - "outputs": [], - "source": [ - "# store tuning dataset in GCS\n", - "gemini_validation_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/validation_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "df_gemini_validation.to_json(\n", - " gemini_validation_data_gcs_path, orient=\"records\", lines=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2d9afbf270ba" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "9492a4c71837" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=gemini_validation_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "191fbdd4bc30" - }, - "source": [ - "------------------------" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KpuZz3jCfcuD" - }, - "source": [ - "#### [Option 3] AutoML JSONL training dataset format to Gemini tuning data format\n", - "\n", - "If you were previously using Vertex AI AutoML for text classification, and you have your data in the below JSONL format expected by AutoML:\n", - "\n", - "```\n", - "{\n", - " \"classificationAnnotation\": {\n", - " \"displayName\": \"label\"\n", - " },\n", - " \"textContent\": \"inline_text\",\n", - " \"dataItemResourceLabels\": {\n", - " \"aiplatform.googleapis.com/ml_use\": \"training|test|validation\"\n", - " }\n", - "}\n", - "{\n", - " \"classificationAnnotation\": {\n", - " \"displayName\": \"label2\"\n", - " },\n", - " \"textContent\": \"inline_text\",\n", - " \"dataItemResourceLabels\": {\n", - " \"aiplatform.googleapis.com/ml_use\": \"training|test|validation\"\n", - " }\n", - "}\n", - "```\n", - "\n", - " In the file `data_transformations_tuning.py` we have the function ` def convert_tuning_dataset_from_automl_jsonl(...)` to convert AutoML JSONL datasets for text classification to the format expected for the tuning dataset to fine-tune Gemini models." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "05b04e4270d8" - }, - "source": [ - "##### Prepare Tuning Dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3081dbe973e6" - }, - "outputs": [], - "source": [ - "# Usage Example for Training dataset\n", - "\n", - "gcs_path_automl_dataset = (\n", - " \"gs:///.jsonl\" # @param {type: \"string\"}\n", - ")\n", - "\n", - "df_gemini_tuning = convert_tuning_dataset_from_automl_jsonl(\n", - " project_id=PROJECT_ID,\n", - " automl_gcs_jsonl_path=gcs_path_automl_dataset,\n", - " system_prompt=system_prompt_zero_shot,\n", - " partition=\"training\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "368a0a0d26ea" - }, - "outputs": [], - "source": [ - "df_gemini_tuning" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1c01550efe4b" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "W0QMNGCvBR4f" - }, - "outputs": [], - "source": [ - "# store tuning dataset in GCS\n", - "gemini_tuning_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/tuning_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "df_gemini_tuning.to_json(gemini_tuning_data_gcs_path, orient=\"records\", lines=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8e339802cd01" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ae1985e9a251" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=gemini_tuning_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "616487009b94" - }, - "source": [ - "##### Prepare Validation Dataset\n", - "Now we repeat the same process but with the validation dataset. When fine-tuning Gemini you can pass on two datasets: Training/Tuning Dataset (mandatory) and Validation Dataset (optional). If the validation dataset is provided, you can monitor also the metrics on this dataset during the tuning process, however providing a validaiton dataset is optional." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "e23edfa617ee" - }, - "outputs": [], - "source": [ - "gcs_path_automl_dataset = (\n", - " \"gs:///.jsonl\" # @param {type: \"string\"}\n", - ")\n", - "\n", - "df_gemini_validation = convert_tuning_dataset_from_automl_jsonl(\n", - " project_id=PROJECT_ID,\n", - " automl_gcs_jsonl_path=gcs_path_automl_dataset,\n", - " system_prompt=system_prompt_zero_shot,\n", - " partition=\"validation\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "a1e4aa7699a2" - }, - "source": [ - "We store this dataset in Google Cloud Storage to later on pass it when setting up the tuning job.\n", - "\n", - "The expected format is JSONL, thus we will convert the pandas DataFrame to JSONL when storing it on Cloud Storage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RWPTSFceBSJH" - }, - "outputs": [], - "source": [ - "# store tuning dataset in GCS\n", - "gemini_validation_data_gcs_path = f\"gs://{BUCKET_NAME}/tuning_experiments/validation_dataset_gemini.jsonl\" # @param {type: \"string\"}\n", - "df_gemini_validation.to_json(\n", - " gemini_validation_data_gcs_path, orient=\"records\", lines=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "41b110afaf03" - }, - "source": [ - "In order to make sure it is in the expected format and we won't get later on errors when launching the tuning job, we'll use our custom function validate the dataset has the format and the roles required for tuning Gemini models.\n", - "\n", - "If the output is an empty list, it means there were no errors encountered. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5c94d573bba2" - }, - "outputs": [], - "source": [ - "validate_gemini_tuning_jsonl(gcs_jsonl_path=gemini_validation_data_gcs_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eff7b6438ae2" - }, - "source": [ - "---------------------------" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "51vqjijWfcuD" - }, - "source": [ - "### 4.2 Start fine-tuning job\n", - "\n", - "- source_model: Specifies the base Gemini model version you want to fine-tune.\n", - "- train_dataset: Path to your training data in JSONL format.\n", - "\n", - "Optional parameters\n", - "\n", - "- validation_dataset: If provided, this data is used to evaluate the model during tuning.\n", - "- adapter_size: A higher adapter size means more trainable parameters.\n", - "- epochs: The number of training epochs to run.\n", - "- learning_rate_multiplier: A value to scale the learning rate during training.\n", - "\n", - "We recommend to make a different set of experiments with different hyperparameter. The below configurations are recommended to experiment based on our experiments, if your dataset is in the size of 1000s and you are including the system role in your dataset.\n", - "\n", - "1. epochs: 4, learning_rate_multiplier: 1, adapter_size: 1\n", - "1. epochs: 12, learning_rate_multiplier: 4, adapter_size: 1\n", - "\n", - "If you are not including system role in your dataset, and only role user with the raw text and role models with the label, then we recommend to increase the adapter size. The below are some configurations you can start experimenting with.\n", - "\n", - "1. epochs: 12, learning_rate_multiplier: 4, adapter_size: 4\n", - "1. epochs: 24, learning_rate_multiplier: 4, adapter_size: 4\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fe0d3b83aeed" - }, - "source": [ - "First, we set the parameters values for the first fine tuning job. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bcb7bd25c14d" - }, - "outputs": [], - "source": [ - "# Tune a model using `train` method.\n", - "\n", - "tuned_model_name = \"\" # @param {type: \"string\"}\n", - "epochs = 4 # @param\n", - "learning_rate_multiplier = 1 # @param\n", - "adapter_size = 1 # @param" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d82dfffa4d67" - }, - "source": [ - "Now, we trigger the tuning job. After running the below cell, you'll get a link to the console where you can monitor the tuning job such as metrics, and get statistics of the dataset used for tuning. After the tuning job finishes, you can also find the details for it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "fhfUjyF2fcuD" - }, - "outputs": [], - "source": [ - "sft_tuning_job = sft.train(\n", - " tuned_model_display_name=tuned_model_name,\n", - " source_model=\"gemini-1.0-pro-002\",\n", - " train_dataset=tuning_data_gcs_path,\n", - " # Optional:\n", - " validation_dataset=validation_data_gcs_path,\n", - " epochs=epochs,\n", - " learning_rate_multiplier=learning_rate_multiplier,\n", - " adapter_size=adapter_size,\n", - ")\n", - "\n", - "# Get the tuning job info.\n", - "sft_tuning_job.to_dict()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vIgb_lalfcuD" - }, - "outputs": [], - "source": [ - "# Get the resource name of the tuning job\n", - "sft_tuning_job_name = sft_tuning_job.resource_name\n", - "sft_tuning_job_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7xxo0CWRfcuD" - }, - "source": [ - "### 4.3 Get the tuned model and test it\n", - "\n", - "To retrieve the full path from the console. You can go to [Vertex AI Studio tuning section](https://console.cloud.google.com/vertex-ai/generative/language/tuning?_ga=2.250955014.1608754049.1722498783-327343626.1722249232) and select the region where you launched your job, click on your tuning job and go to details. The last part of the Tuning Job path is the tuning job ID. Alternatively, you can also select the entire path and replace it directly as an argument for `sft.SupervisedTuningJob(...)`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7bHSuQEZfcuD" - }, - "outputs": [], - "source": [ - "# Get tuning job\n", - "TUNING_JOB_ID = \"\" # @param example 952462564720115710\n", - "sft_tuning_job = sft.SupervisedTuningJob(\n", - " f\"projects/{PROJECT_ID}/locations/{LOCATION}/tuningJobs/{TUNING_JOB_ID}\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ahcGD1KxfcuD" - }, - "outputs": [], - "source": [ - "# tuned model endpoint name\n", - "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", - "tuned_model_endpoint_name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LchxG2QufcuD" - }, - "outputs": [], - "source": [ - "# tuned model name\n", - "tuned_model_name = sft_tuning_job.tuned_model_name\n", - "tuned_model_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bfc143bd2866" - }, - "source": [ - "Initiate the tuned model and test it on a single example. We will use the same generation and safety configuration as when doing in-context learning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "e00bee65f433" - }, - "outputs": [], - "source": [ - "tuned_gemini_pro = GenerativeModel(\n", - " tuned_model_endpoint_name,\n", - " system_instruction=[system_prompt_zero_shot],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OOzD7dG2fcuE" - }, - "outputs": [], - "source": [ - "response = tuned_gemini_pro.generate_content([test[\"text\"].iloc[4]], stream=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xZw7iucdfcuE" - }, - "outputs": [], - "source": [ - "print(\"predicted\", response.text)\n", - "print(\"ground truth\", test[\"label_text\"].iloc[4])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pbOY6eEgfcuE" - }, - "source": [ - "### 4.4 Run evaluations on tuned model and log experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "-8tVNr4SfcuE" - }, - "outputs": [], - "source": [ - "# Get the list of messages to predict\n", - "messages_to_predict = test[\"text\"].to_list()\n", - "# Compute the predictions using the zero-shot prompt\n", - "predictions_tuned_model = batch_predict(\n", - " messages=messages_to_predict, model=tuned_gemini_pro, max_workers=4\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "JX-t7KXxfcuE" - }, - "outputs": [], - "source": [ - "df_evals[\"tuned-gem1.0-ep4-lrm1-rank4\"] = predictions_tuned_model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4ri9X1ZOfcuE" - }, - "outputs": [], - "source": [ - "metrics_tuned_gemini = evaluate_predictions(\n", - " df_evals.copy(),\n", - " target_column=\"label_text\",\n", - " predictions_column=\"tuned-gem1.0-ep4-lrm1-rank4\",\n", - " postprocessing=True,\n", - ")\n", - "metrics_tuned_gemini" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "R0DWM0nmfcuE" - }, - "outputs": [], - "source": [ - "# Log Experiment with zero-shot Prompt with Gemini 1.5 Pro\n", - "\n", - "params = {\n", - " \"model\": tuned_model_name,\n", - " \"adaptation_type\": \"fine-tuning Gemini 1.0 Pro 002\",\n", - " \"temperature\": 0,\n", - " \"max_output_tokens\": 10,\n", - " \"epochs\": epochs,\n", - " \"lrm\": learning_rate_multiplier,\n", - " \"adapter_size\": adapter_size,\n", - "}\n", - "\n", - "experiment_manager.log_run(\n", - " run_name=\" Note: In the experiments with this dataset the most performant model was achieved by fine-tuning Gemini 1.0 Pro with the below parameters:\n", - "\n", - "```\n", - "epochs=6, learning_rate_multiplier= 1, and adapter_size=4\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pZzNV7fnuJsy" - }, - "source": [ - "## [Optional] 6. Heuristics for Computing Confidence Scores\n", - "\n", - "Due to the multitask essence of LLMs computing confidence scores is not as straightforward as it is with traditional predictive AI. Gemini models do not expose logprobs for the time being. However, the below snippets provide some options to use as a proxy for confidence scores in your predictions. You can expand these options to your own use cases and needs.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KW7wPWQWuQT4" - }, - "source": [ - "### [Option 1] - Getting multiple responses from the model and generate a majority voting ratio\n", - "\n", - "The overall idea is to generate different answers with the same model. Then pick the most \"voted/returned\" answer, and calculate its \"confidence score\" by dividing the number of votes among the total number of responses/candidates." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "acbe5d5c8a48" - }, - "source": [ - "First we will define the function that will help us do the prediction and the numerical confidence." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rViPbiseuOmr" - }, - "outputs": [], - "source": [ - "def get_prediction_with_numeric_score(\n", - " text_to_predict: str, model: Any, candidate_counts: int\n", - ") -> dict[str, float | str]:\n", - " \"\"\"\n", - " Generates multiple predictions from a model and determines\n", - " the most frequent response along with its confidence score.\n", - "\n", - " Args:\n", - " text_to_predict: The input text for which to generate predictions.\n", - " model: The prediction model to use.\n", - " candidate_counts: The number of predictions to generate.\n", - "\n", - " Returns:\n", - " A dictionary containing the majority prediction and its confidence score.\n", - " For example: {\"prediction\": \"business\", \"confidence_score\": 0.75}\n", - " \"\"\"\n", - " responses = []\n", - " for _ in range(candidate_counts):\n", - " responses.append(model.generate_content(text_to_predict).text)\n", - "\n", - " counts = Counter(responses)\n", - " max_value = max(counts.values())\n", - " majority_response = [key for key in counts if counts[key] == max_value][0]\n", - " confidence = max_value / len(responses)\n", - " result = {\"prediction\": majority_response, \"confidence_score\": confidence}\n", - " return result" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2960c03f3be7" - }, - "source": [ - "Initialize the model to predict the class. In this example, we will use the tuned Gemini model we created before. We will use the same configurations used when doing in-context and in-weights learning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "8ae1e8c36764" - }, - "outputs": [], - "source": [ - "tuned_gemini_pro = GenerativeModel(\n", - " tuned_model_endpoint_name,\n", - " system_instruction=[system_prompt_zero_shot],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7a1d74a2a393" - }, - "source": [ - "Get the predictions with its corresponding confidence score for an example text in our test dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wiN6cX3VuXeE" - }, - "outputs": [], - "source": [ - "res = get_prediction_with_numeric_score(\n", - " text_to_predict=test[\"text\"].iloc[473], model=tuned_gemini_pro, candidate_counts=4\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8796e7d50560" - }, - "source": [ - "Print the response and the ground truth for comparison purposes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "iSVIIVHquYOL" - }, - "outputs": [], - "source": [ - "print(\"Predicted Response with Confidence Score: \\n\", res)\n", - "print(\"Ground Truth:\\n\", test[\"label_text\"].iloc[473])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NEWe0M-juZyx" - }, - "source": [ - "### [Option 2] - Generating \"Verbal Confidences\" with an LLM\n", - "\n", - "The idea is to make 2-calls per prediction, one for predicting the class, and a second one to ask the LLM to judge how confident it is about it, giving as options verbal confidences like \"low\", \"medium\" and \"high\".\n", - "\n", - "In this example, we will use our tuned Gemini model to predict the class and frozen Gemini 1.5 Pro to judge the prediction verbally.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4593a06d3708" - }, - "source": [ - "First we will define the function that will help us do the prediction and the verbal confidence." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HGWzgnx1udB0" - }, - "outputs": [], - "source": [ - "def get_prediction_with_verbal_score(\n", - " text_to_predict: str,\n", - " model_to_predict_class: Any,\n", - " model_to_eval_prediction: Any,\n", - " possible_classes: list[str] = [\n", - " \"business\",\n", - " \"entertainment\",\n", - " \"sport\",\n", - " \"tech\",\n", - " \"politics\",\n", - " ],\n", - ") -> dict[str, str]:\n", - " \"\"\"\n", - " Generates a prediction and then evaluates its confidence using a separate model.\n", - "\n", - " Args:\n", - " text_to_predict: The input text for which to generate predictions.\n", - " model_to_predict_class: The model to predict the class.\n", - " model_to_eval_prediction: The model to evaluate the confidence of the prediction.\n", - " possible_classes: A list of possible classes.\n", - "\n", - " Returns:\n", - " A dictionary containing the prediction and its verbal confidence score.\n", - " For example: {\"prediction\": \"business\", \"verbal_score\": \"very confident\"}\n", - " \"\"\"\n", - " prediction = model_to_predict_class.generate_content(text_to_predict).text\n", - " remaining_classes = possible_classes.copy()\n", - " remaining_classes.remove(prediction)\n", - " formatted_prompt = f\"\"\"\n", - " TEXT:\n", - " {text_to_predict}\n", - "\n", - " PREDICTED CLASS:\n", - " {prediction}\n", - "\n", - " OTHER POSSIBLE CLASSES:\n", - " {remaining_classes}\n", - " \"\"\"\n", - " confidence = model_to_eval_prediction.generate_content(formatted_prompt).text\n", - " result = {\"prediction\": prediction, \"verbal_score\": confidence}\n", - " return result" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d31f34979788" - }, - "source": [ - "Configure the model parameters and initialize the model to predict the class. In this example, we will use the tuned Gemini model we created before. We will use the same configurations used when doing in-context and in-weights learning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "S8ginB-BufDT" - }, - "outputs": [], - "source": [ - "model_to_predict_class = GenerativeModel(\n", - " tuned_model_endpoint_name,\n", - " system_instruction=[system_prompt_zero_shot],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "778511fcc385" - }, - "source": [ - "Define the Prompt to steer the evaluation and verbal confidence." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": { - "id": "USxu9I0nugwU" - }, - "outputs": [], - "source": [ - "# Define the configurations for the model which will evaluate the predictions\n", - "\n", - "eval_prompt = \"\"\"\n", - "You will get a text about a particular topic, the predicted class for the topic and a list of the other different classes that the model could have chosen.\n", - "Your task is to judge how well the predicted class fitted the text, based on the other possible classes.\n", - "You need to evaluate and judge your prediction, indicating how confident you are with your answer. You will judge the prediction as follows:\n", - "\n", - "- If you are confident the text is correctly labeled with the given prediction, then respond with \"High\"\n", - "- If it can be that the model could match other classes, or you are not very sure the class corresponds to the text, then respond with \"Medium\"\n", - "- If you believe it makes no sense the class predicted for that text, then respond with \"Low\".\n", - "\n", - "You MUST only output \"High\", \"Medium\" or \"Low\" without any further explanation.\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7b1573955467" - }, - "source": [ - "Initialize the model to be used for computing the verbal confidence" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "1e6557bf4b49" - }, - "outputs": [], - "source": [ - "model_to_eval_class = GenerativeModel(\n", - " \"gemini-1.5-pro-001\",\n", - " system_instruction=[eval_prompt],\n", - " generation_config=generation_config,\n", - " safety_settings=safety_settings,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "79e668565aab" - }, - "source": [ - "Call the function to compute the prediction including its verbal score" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bf8liMlCumTl" - }, - "outputs": [], - "source": [ - "# Call the function to get the predictions with verbal score\n", - "res_verbal_conf = get_prediction_with_verbal_score(\n", - " text_to_predict=test[\"text\"].iloc[473],\n", - " model_to_predict_class=model_to_predict_class,\n", - " model_to_eval_prediction=model_to_eval_class,\n", - " possible_classes=[\"business\", \"entertainment\", \"sport\", \"tech\", \"politics\"],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "sUHITtnXukCx" - }, - "outputs": [], - "source": [ - "print(\"Predicted Response with Verbal Score: \\n\", res_verbal_conf)\n", - "print(\"Ground Truth:\\n\", test[\"label_text\"].iloc[473])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "HPX8n4Wp0_uN" - }, - "source": [ - "## Cleaning up" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8-SUNrp_1MJ3" - }, - "source": [ - "To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\n", - "project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n", - "\n", - "\n", - "Otherwise, you can delete the individual resources you created in this tutorial.\n", - "\n", - "Refer to this [instructions](https://cloud.google.com/vertex-ai/docs/tutorials/image-classification-custom/cleanup#delete_resources) to delete the resources from console." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "TzpTkkai1LG7" - }, - "outputs": [], - "source": [ - "# Delete Experiment.\n", - "delete_experiments = True\n", - "if delete_experiments:\n", - " experiments_list = aiplatform.Experiment.list()\n", - " for experiment in experiments_list:\n", - " if experiment.resource_name == EXPERIMENT_NAME:\n", - " print(experiment.resource_name)\n", - " experiment.delete()\n", - " break\n", - "\n", - "print(\"***\" * 10)\n", - "\n", - "# Delete Endpoint.\n", - "delete_endpoint = True\n", - "# If force is set to True, all deployed models on this\n", - "# Endpoint will be first undeployed.\n", - "if delete_endpoint:\n", - " for endpoint in aiplatform.Endpoint.list():\n", - " if endpoint.resource_name == tuned_model_endpoint_name:\n", - " print(endpoint.resource_name)\n", - " endpoint.delete(force=True)\n", - " break\n", - "\n", - "print(\"***\" * 10)\n", - "\n", - "# Delete Model.\n", - "delete_model = True\n", - "if delete_model:\n", - " # Remove version from model name.\n", - " tuned_model_name = tuned_model_name.split(\"@\")[0]\n", - " for model in aiplatform.Model.list():\n", - " if model.resource_name == tuned_model_name:\n", - " print(model.resource_name)\n", - " model.delete()\n", - " break\n", - "\n", - "print(\"***\" * 10)\n", - "\n", - "# Delete Cloud Storage Bucket.\n", - "delete_bucket = True\n", - "if delete_bucket:\n", - " ! gsutil -m rm -r $BUCKET_URI" - ] - } - ], - "metadata": { - "colab": { - "name": "gemini_supervised_finetuning_text_classification.ipynb", - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/gemini/tuning/gemini_supervised_tuning_qa.ipynb b/gemini/tuning/gemini_supervised_tuning_qa.ipynb deleted file mode 100644 index 3d96dc78b7..0000000000 --- a/gemini/tuning/gemini_supervised_tuning_qa.ipynb +++ /dev/null @@ -1,920 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4tXEYL-zSWru" - }, - "outputs": [], - "source": [ - "# Copyright 2024 Google LLC\n", - "#\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rDcI0SYySZiu" - }, - "source": [ - "# Supervised Fine Tuning with Gemini for Question & Answering\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \"Google
Open in Colab\n", - "
\n", - "
\n", - " \n", - " \"Google
Open in Colab Enterprise\n", - "
\n", - "
\n", - " \n", - " \"Vertex
Open in Vertex AI Workbench\n", - "
\n", - "
\n", - " \n", - " \"GitHub
View on GitHub\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "97b1d7579d2b" - }, - "source": [ - "| | |\n", - "|-|-|\n", - "| Author(s) | [Erwin Huizenga](https://github.com/erwinh85) |" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "l5LXfKSgShfJ" - }, - "source": [ - "## Overview\n", - "This notebook demonstrates fine-tuning the Gemini generative model using the Vertex AI Supervised Tuning feature. Supervised Tuning allows you to use your training data to refine the base model's capabilities toward specific tasks.\n", - "\n", - "Supervised Tuning uses labeled examples to tune a model. Each example demonstrates the output you want from your text model during inference.\n", - "\n", - "- Data Preparation: Your role is crucial in ensuring your training data is high-quality, well-labeled, and directly relevant to the target task. The quality of the data can significantly impact the model's performance and the presence of bias in the fine-tuned model, underscoring the importance of your contribution.\n", - "- Training: This phase presents an exciting opportunity to experiment with different configurations, allowing you to optimize the model's performance on the target task. Your creativity and innovation can lead to significant improvements in the model's capabilities.\n", - "- Evaluation:\n", - " - Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", - " - Evaluation Set: Use a separate set of data to evaluate the model's performance\n", - "\n", - "### Recommended configurations\n", - "The following table shows the recommended configurations for tuning a foundation model by task:\n", - "\n", - "| Task | No. of examples in dataset | Number of epochs |\n", - "| -------------- | -------------------------- | ----------- |\n", - "| Classification | 500+ | 2-4 |\n", - "| Summarization | 1000+ | 2-4 |\n", - "| Extractive QA | 500+ | 2-4 |\n", - "| Chat | 1000+ | 2-4 |\n", - "\n", - "Before running this notebook, ensure you have:\n", - "\n", - "- A Google Cloud project: Provide your project ID in the `PROJECT_ID` variable.\n", - "\n", - "- Authenticated your Colab environment: Run the authentication code block at the beginning.\n", - "\n", - "- Prepared training data: Data should be formatted in JSON Lines with prompts and corresponding completions." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2D59iF36T62k" - }, - "source": [ - "## Getting Started" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4fEBa5FbT-dc" - }, - "source": [ - "### Install Vertex AI SDK and other required packages" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "0M04I5j3_KY5" - }, - "outputs": [], - "source": [ - "!pip3 install --upgrade --user --quiet google-cloud-aiplatform" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FLXs74x8UD6A" - }, - "source": [ - "### Restart runtime (Colab only)\n", - "\n", - "To use the newly installed packages, you must restart the runtime on Google Colab." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "8nnS20eqUJSK" - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " import IPython\n", - "\n", - " app = IPython.Application.instance()\n", - " app.kernel.do_shutdown(True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qoh03Du8UNAn" - }, - "source": [ - "
\n", - "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ID0JD1lUUPmx" - }, - "source": [ - "## Authenticate your notebook environment (Colab only)\n", - "\n", - "If you are running this notebook on Google Colab, run the cell below to authenticate your environment." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "id": "t49529b_UZJ0" - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " from google.colab import auth\n", - "\n", - " auth.authenticate_user()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JKcjFI9iUa2T" - }, - "source": [ - "- If you are running this notebook in a local development environment:\n", - " - Install the [Google Cloud SDK](https://cloud.google.com/sdk).\n", - " - Obtain authentication credentials. Create local credentials by running the following command and following the oauth2 flow (read more about the command [here](https://cloud.google.com/sdk/gcloud/reference/beta/auth/application-default/login)):\n", - "\n", - " ```bash\n", - " gcloud auth application-default login\n", - " ```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MyKgURhIUnAM" - }, - "source": [ - "## Set Project and Location\n", - "\n", - "First, you will have to set your project_id, location, and bucket_name. You can also use an existing bucket within the project." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "4acO9tVcU1Ey" - }, - "outputs": [], - "source": [ - "PROJECT_ID = \"[your-project]\" # @param {type:\"string\"}\n", - "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", - "\n", - "BUCKET_NAME = \"[your-bucket]\" # @param {type:\"string\"}\n", - "BUCKET_URI = f\"gs://{BUCKET_NAME}\"" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "udWd9pp7YTSb" - }, - "outputs": [], - "source": [ - "if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"\":\n", - " BUCKET_NAME = \"vertex-\" + UUID\n", - " BUCKET_URI = f\"gs://{BUCKET_NAME}\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aVe_7ETAasWS" - }, - "source": [ - "**warning**: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "z6L_c9lwatsw" - }, - "outputs": [], - "source": [ - "! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bJllsmMiavrH" - }, - "source": [ - "Finally, validate access to your Cloud Storage bucket by examining its contents:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "87Mrn9G6ayl3" - }, - "outputs": [], - "source": [ - "! gsutil ls -al $BUCKET_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AkBZ-e85UeiI" - }, - "source": [ - "## Import Libraries" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "jsnIinC4UfZq" - }, - "outputs": [], - "source": [ - "import vertexai\n", - "from vertexai.generative_models import GenerativeModel\n", - "from vertexai.preview.tuning import sft\n", - "\n", - "vertexai.init(project=PROJECT_ID, location=LOCATION)\n", - "\n", - "import datetime\n", - "\n", - "from google.cloud import bigquery\n", - "import pandas as pd\n", - "from sklearn.model_selection import train_test_split" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nbaPSIO4_iur" - }, - "source": [ - "## Supervised fine tuning with Gemini on a question and answer dataset\n", - "\n", - "Now it's time for you to create a tuning job. You will be using a Q&A with a context dataset in JSON format.\n", - "\n", - "Supervised fine-tuning offers a solution, allowing focused adaptation of foundation models to new tasks. You can create a supervised text model tuning job using the Google Cloud console, API, or the Vertex AI SDK for Python. You can read more on our [documentation page](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-use-supervised-tuning),\n", - "\n", - "But how do you ensure your data is primed for success with supervised fine-tuning? Here's a breakdown of critical areas to focus on:\n", - "\n", - "- **Domain Alignment:** Supervised fine-tuning thrives on smaller datasets, but they must be highly relevant to your downstream task. Seek out data that closely mirrors the domain you will encounter in real-world use cases.\n", - "- **Labeling Accuracy:** Noisy labels will sabotage even the best technique. Prioritize accuracy in your annotations and labeling.\n", - "- **Noise Reduction:** Outliers, inconsistencies, or irrelevant examples hurt model adaptation. Implement preprocessing, such as removing duplicates, fixing typos, and verifying that data conforms to your task's expectations.\n", - "- **Distribution:** A diverse range of examples will help your model generalize better within the confines of your target task. Refrain from overloading the process with excessive variance that strays from your core domain.\n", - "- **Balanced Classes:** For classification tasks, try to keep a reasonable balance between different classes to avoid the model learning biases towards a specific class\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ivFjWO5M-Z8H" - }, - "source": [ - "### Fetching data from BigQuery\n", - "💾 Your model tuning dataset must be in a JSONL format where each line contains a single training example. You must make sure that you include instructions.\n", - "\n", - "You will use the [StackOverflow dataset](https://cloud.google.com/blog/topics/public-datasets/google-bigquery-public-datasets-now-include-stack-overflow-q-a) on BigQuery Public Datasets, limiting to questions with the `python` tag, and accepted answers for answers since 2020-01-01.\n", - "\n", - "You will use a helper function to read the data from BigQuery and create a Pandas dataframe." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "2JIlL-aVbNPg" - }, - "outputs": [], - "source": [ - "def run_bq_query(sql: str) -> str | pd.DataFrame:\n", - " \"\"\"\n", - " Run a BigQuery query and return the job ID or result as a DataFrame\n", - " Args:\n", - " sql: SQL query, as a string, to execute in BigQuery\n", - " Returns:\n", - " df: DataFrame of results from query, or error, if any\n", - " \"\"\"\n", - "\n", - " bq_client = bigquery.Client(project=PROJECT_ID)\n", - "\n", - " # Try dry run before executing query to catch any errors\n", - " job_config = bigquery.QueryJobConfig(dry_run=True, use_query_cache=False)\n", - " bq_client.query(sql, job_config=job_config)\n", - "\n", - " # If dry run succeeds without errors, proceed to run query\n", - " job_config = bigquery.QueryJobConfig()\n", - " client_result = bq_client.query(sql, job_config=job_config)\n", - "\n", - " job_id = client_result.job_id\n", - "\n", - " # Wait for query/job to finish running. then get & return DataFrame\n", - " df = client_result.result().to_arrow().to_pandas()\n", - " print(f\"Finished job_id: {job_id}\")\n", - "\n", - " return df" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "11WLzqp-b59c" - }, - "source": [ - "Next you will write the query. For now you will limit our example to 550." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gLC_elwzb3ZF" - }, - "outputs": [], - "source": [ - "stack_overflow_df = run_bq_query(\n", - " \"\"\"SELECT\n", - " CONCAT(q.title, q.body) AS input_text,\n", - " a.body AS output_text\n", - " FROM `bigquery-public-data.stackoverflow.posts_questions` q\n", - " JOIN `bigquery-public-data.stackoverflow.posts_answers` a\n", - " ON q.accepted_answer_id = a.id\n", - " WHERE q.accepted_answer_id IS NOT NULL\n", - " AND REGEXP_CONTAINS(q.tags, \"python\")\n", - " AND a.creation_date >= \"2020-01-01\"\n", - " LIMIT 550\n", - " \"\"\"\n", - ")\n", - "\n", - "stack_overflow_df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b404hW8jcRDQ" - }, - "source": [ - "There should be 550 questions and answers." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mUg-lF61cUVI" - }, - "outputs": [], - "source": [ - "print(len(stack_overflow_df))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yHda8BzbmRMC" - }, - "source": [ - "#### Adding instructions\n", - "Finetuning language models on a collection of datasets phrased as instructions have been shown to improve model performance and generalization to unseen tasks [(Google, 2022)](https://arxiv.org/pdf/2210.11416.pdf).\n", - "\n", - "An instruction refers to a specific directive or guideline that conveys a task or action to be executed. These instructions can be expressed in various forms, such as step-by-step procedures, commands, or rules. When we don't use the instructions, it's only a question and answer. The instruction tells the large language model what to do. We want them to answer the question. We have to give a hint about the task we want to perform. Let's extend the dataset with an instruction." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "id": "XIy7BjKWmu5j" - }, - "outputs": [], - "source": [ - "INSTRUCTION_TEMPLATE = \"\"\"\\\n", - "You are a helpful Python developer \\\n", - "You are good at answering StackOverflow questions \\\n", - "Your mission is to provide developers with helpful answers that work\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-tM_f1b3n4TK" - }, - "source": [ - "You will create a new column for the `INSTRUCTION_TEMPLATE`. Use a new column and do not overwrite the existing one, which you might want to use later." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UJpAJG8uoE7F" - }, - "outputs": [], - "source": [ - "stack_overflow_df[\"input_text_instruct\"] = INSTRUCTION_TEMPLATE\n", - "\n", - "stack_overflow_df.head(2)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MNMMxaB2cZvY" - }, - "source": [ - "Next, you will randomly split the data into training and evaluation. For Extractive Q&A tasks, we advise 500+ training examples. In this case, you will use 440 to generate a tuning job that runs faster. \n", - "\n", - "20% of your dataset will be used for test. The `random_state` controls the shuffling applied to the data before applying the split. Pass an int for reproducible output across multiple function calls. Feel free to adjust this. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qdrweRsscfgU" - }, - "outputs": [], - "source": [ - "# split is set to 80/20\n", - "train, evaluation = train_test_split(stack_overflow_df, test_size=0.2, random_state=42)\n", - "\n", - "print(len(train))\n", - "print(len(evaluation))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b_MuRRGmfLni" - }, - "source": [ - "#### Generating the JSONL files\n", - "\n", - "Prepare your training data in a JSONL (JSON Lines) file and store it in a Google Cloud Storage (GCS) bucket. This format ensures efficient processing. Each line of the JSONL file must represent a single data instance and follow a well-defined schema:\n", - "\n", - "`{\"messages\": [{\"role\": \"system\", \"content\": \"instructions\"}, {\"role\": \"user\", \"content\": \"question\"}, {\"role\": \"model\", \"content\": \"answering\"}]}`\n", - "\n", - "This is how it maps to the Pandas df columns:\n", - "\n", - "* `instructions -> input_text_instruct`\n", - "* `question -> input_text`\n", - "* `answer -> output_text`\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "id": "fgPXoXOlc0vI" - }, - "outputs": [], - "source": [ - "date = datetime.datetime.now().strftime(\"%H:%d:%m:%Y\")\n", - "\n", - "tuning_data_filename = f\"tune_data_stack_overflow_qa-{date}.jsonl\"\n", - "validation_data_filename = f\"validation_data_stack_overflow_qa-{date}.jsonl\"" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "id": "9-oHmx0wfElN" - }, - "outputs": [], - "source": [ - "def format_messages(row):\n", - " \"\"\"Formats a single row into the desired JSONL structure\"\"\"\n", - " return {\n", - " \"messages\": [\n", - " {\"role\": \"system\", \"content\": row[\"input_text_instruct\"]},\n", - " {\"role\": \"user\", \"content\": row[\"input_text\"]},\n", - " {\"role\": \"model\", \"content\": row[\"output_text\"]},\n", - " ]\n", - " }" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "id": "8mBwn2jJEkYl" - }, - "outputs": [], - "source": [ - "# Apply formatting function to each row, then convert to JSON Lines format\n", - "tuning_data = train.apply(format_messages, axis=1).to_json(orient=\"records\", lines=True)\n", - "\n", - "# Save the result to a JSONL file\n", - "with open(tuning_data_filename, \"w\") as f:\n", - " f.write(tuning_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yz9IbouGftaZ" - }, - "source": [ - "Next you can check if the number of rows match with your Pandas df." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "w4JfgAijikHp" - }, - "outputs": [], - "source": [ - "with open(tuning_data_filename) as f:\n", - " num_rows = sum(1 for _ in f)\n", - "\n", - "print(\"Number of rows in the JSONL file:\", num_rows)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "42u53mHQVZk3" - }, - "source": [ - "You will do the same for the validation dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": { - "id": "nBc6ufE0h2zL" - }, - "outputs": [], - "source": [ - "# Apply formatting function to each row, then convert to JSON Lines format\n", - "validation_data = evaluation.apply(format_messages, axis=1).to_json(\n", - " orient=\"records\", lines=True\n", - ")\n", - "\n", - "# Save the result to a JSONL file\n", - "with open(validation_data_filename, \"w\") as f:\n", - " f.write(validation_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "sYGr7h_2ahqb" - }, - "source": [ - "Next, you will copy the JSONL files into the Google Cloud Storage bucket you specified or created at the beginning of the notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "eq0MYC6nxhKy" - }, - "outputs": [], - "source": [ - "!gsutil cp $tuning_data_filename $validation_data_filename $BUCKET_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "cBq0NMIxa2iD" - }, - "source": [ - "Next you can check if the files are in the bucket." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "cVel0g6pkOiA" - }, - "outputs": [], - "source": [ - "!gsutil ls -al $BUCKET_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hsc0xhNGa7ZQ" - }, - "source": [ - "Now, you will create two variables for the data.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": { - "id": "tXzEZFjtkTWJ" - }, - "outputs": [], - "source": [ - "TUNING_DATA_URI = f\"{BUCKET_URI}/{tuning_data_filename}\"\n", - "VALIDATION_DATA_URI = f\"{BUCKET_URI}/{validation_data_filename}\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZAOu-xJnA54y" - }, - "source": [ - "### Create a supervised tuning job using Gemini\n", - "Now it's time for you to start your tuning job. You will use the `gemini-1.0-pro-002` model." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": { - "id": "SodJv2vWicfu" - }, - "outputs": [], - "source": [ - "foundation_model = GenerativeModel(\"gemini-1.0-pro-002\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6e7zBH5foZbC" - }, - "outputs": [], - "source": [ - "# Tune a model using `train` method.\n", - "sft_tuning_job = sft.train(\n", - " source_model=foundation_model,\n", - " train_dataset=TUNING_DATA_URI,\n", - " # Optional:\n", - " validation_dataset=VALIDATION_DATA_URI,\n", - " epochs=3,\n", - " learning_rate_multiplier=1.0,\n", - ")\n", - "\n", - "# Get the tuning job info.\n", - "sft_tuning_job.to_dict()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-LSm5Ns5gjx-" - }, - "source": [ - "Lets monitor the state. Wait for the next step to complete. Tuning a model will take some time." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KgukIEFPlVdD" - }, - "source": [ - "Next you can retrieve the model resource name." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Q3yiKi-KofGK" - }, - "outputs": [], - "source": [ - "# Get the resource name of the tuning job\n", - "sft_tuning_job_name = sft_tuning_job.resource_name\n", - "sft_tuning_job_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SM1RZVqWKRdg" - }, - "source": [ - "Tuning takes time. Please wait until the job is finished before you continue after the next cell." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Uyug1dw4FAgn" - }, - "outputs": [], - "source": [ - "%%time\n", - "# Wait for job completion\n", - "while not sft_tuning_job.refresh().has_ended:\n", - " time.sleep(60)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "nyDS9G2TTX9p" - }, - "outputs": [], - "source": [ - "# tuned model name\n", - "tuned_model_name = sft_tuning_job.tuned_model_name\n", - "tuned_model_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iU2AVJdLliUh" - }, - "source": [ - "And the model endpoint." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_s57xpI5o9m0" - }, - "source": [ - "You can use `tuning.TuningJob.list()` to retrieve your tuning jobs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "8QtT3uJ3Jw0N" - }, - "outputs": [], - "source": [ - "sft_tuning_job.list()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0KQmyjjcJ9uz" - }, - "source": [ - "You model is automatically deployed as a Vertex AI Endpoint and ready for usage!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "X9uQD-Ee_h6h" - }, - "outputs": [], - "source": [ - "# tuned model endpoint name\n", - "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", - "tuned_model_endpoint_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IRPlb4ZO8ulD" - }, - "source": [ - "# Load tuned Generative Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OhfU4wTOtH1y" - }, - "outputs": [], - "source": [ - "tuned_model = GenerativeModel(tuned_model_endpoint_name)\n", - "print(tuned_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1b1b39693d75" - }, - "source": [ - "Call the API" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "S1q1PT2zJRO9" - }, - "outputs": [], - "source": [ - "tuned_model.generate_content(\n", - " \"How do I store a TensorFlow checkpoint on Google Cloud Storage while training?\"\n", - ")" - ] - } - ], - "metadata": { - "colab": { - "name": "gemini_supervised_tuning_qa.ipynb", - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb index 0b79e28204..27c7faf1a0 100644 --- a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb +++ b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb @@ -76,6 +76,8 @@ "\n", "This notebook serves as a tool to preprocess and estimate token counts for tuning costs for tuning [`gemini-1.0-pro-002`](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning).\n", "\n", + "You can also find the code to check your dataset for Vertex AI Gemini `gemini-1.5-pro-001`.\n", + "\n", "\n", "For how to prepare dataset for tuning gemini, please refer to this [tutorial](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-about)." ] @@ -100,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "id": "tFy3H3aPgx12" }, @@ -124,7 +126,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "XRvKdaPDTznN" }, @@ -171,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "id": "NyKGtVQjgx13" }, @@ -200,7 +202,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "Nqwi-5ufWp_B" }, @@ -235,7 +237,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "metadata": { "id": "4498u5KpQijW" }, @@ -264,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 2, "metadata": { "id": "f0JwfuPSSofK" }, @@ -288,11 +290,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "id": "PTvunHqRTHqe" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Num training examples: 500\n", + "First example:\n", + "{'role': 'user', 'content': \"#Person1#: Hi, Mr. Smith. I'm Doctor Hawkins. Why are you here today?\\n#Person2#: I found it would be a good idea to get a check-up.\\n#Person1#: Yes, well, you haven't had one for 5 years. You should have one every year.\\n#Person2#: I know. I figure as long as there is nothing wrong, why go see the doctor?\\n#Person1#: Well, the best way to avoid serious illnesses is to find out about them early. So try to come at least once a year for your own good.\\n#Person2#: Ok.\\n#Person1#: Let me see here. Your eyes and ears look fine. Take a deep breath, please. Do you smoke, Mr. Smith?\\n#Person2#: Yes.\\n#Person1#: Smoking is the leading cause of lung cancer and heart disease, you know. You really should quit.\\n#Person2#: I've tried hundreds of times, but I just can't seem to kick the habit.\\n#Person1#: Well, we have classes and some medications that might help. I'll give you more information before you leave.\\n#Person2#: Ok, thanks doctor.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n\"}\n", + "CountTokensResult(total_tokens=277)\n", + "{'role': 'model', 'content': \"Mr. Smith's getting a check-up, and Doctor Hawkins advises them to have one every year. Hawkins'll give some information about their classes and medications to help Mr. Smith quit smoking.\"}\n", + "CountTokensResult(total_tokens=41)\n", + "Num validation examples: 100\n" + ] + } + ], "source": [ "with tf.io.gfile.GFile(training_dataset_uri) as dataset_jsonl_file:\n", " example_training_dataset = [\n", @@ -344,7 +360,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": { "id": "1mzpB9PUVp5z" }, @@ -429,7 +445,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 11, "metadata": { "id": "pUCpEmEFM0eX" }, @@ -466,7 +482,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": { "id": "BgFmhH2XOdzu" }, @@ -526,7 +542,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": { "id": "vm0Jwzt0RDGd" }, @@ -658,7 +674,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": { "id": "uOWsUbwVXoTU" }, @@ -722,7 +738,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { "id": "DVIpbaGYRJQc" }, @@ -757,6 +773,181 @@ " f\"By default, you'll be charged for ~{epoch_count * total_number_of_billable_tokens} tokens.\"\n", ")" ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k35OK86wJVVd" + }, + "source": [ + "## Validate the dataset for Vertex AI Gemini 1.5 fine-tuning" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "KQWJhyaXQRNM" + }, + "outputs": [], + "source": [ + "BASE_MODEL = \"gemini-1.5-pro-001\" # @param ['gemini-1.5-pro-001']{type:\"string\"}\n", + "training_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/train_sft_train_samples.jsonl\" # @param {type:\"string\"}\n", + "validation_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}\n", + "\n", + "tokenizer = get_tokenizer_for_model(BASE_MODEL)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "p-soJC81YNy2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Num training examples: 500\n", + "First example:\n", + "{'role': 'user', 'parts': [{'text': 'Honesty is usually the best policy. It is disrespectful to lie to someone. If you don\\'t want to date someone, you should say so. Sometimes it is easy to be honest. For example, you might be able to truthfully say, \"No, thank you, I already have a date for that party.\" Other times, you might need to find a kinder way to be nice. Maybe you are not attracted to the person. Instead of bluntly saying that, try saying, \"No, thank you, I just don\\'t think we would be a good fit.\" Avoid making up a phony excuse. For instance, don\\'t tell someone you will be out of town this weekend if you won\\'t be. There\\'s a chance that you might then run into them at the movies, which would definitely cause hurt feelings. A compliment sandwich is a really effective way to provide feedback. Essentially, you \"sandwich\" your negative comment between two positive things. Try using this method when you need to reject someone. An example of a compliment sandwich is to say something such as, \"You\\'re an awesome person. Unfortunately, I\\'m not interested in dating you. Someone else is going to be really lucky to date someone with such a great personality!\" You could also try, \"You are a really nice person. I\\'m only interested you as a friend. I like when we hang out in big groups together!\" Be sincere. If you offer false compliments, the other person will likely be able to tell and feel hurt. If you do not want to date someone, it is best to be upfront about your feelings. Do not beat around the bush. If your mind is made up, it is best to clearly state your response. If someone asks you to date them and you don\\'t want to, you can be direct and kind at the same time. State your answer clearly. You can make your feelings clear without purposefully hurting someone else\\'s feelings. Try smiling and saying, \"That sounds fun, but no thank you. I\\'m not interested in dating you.\" Don\\'t beat around the bush. If you do not want to accept the date, there is no need to say, \"Let me think about it.\" It is best to get the rejection over with. You don\\'t want to give someone false hope. Avoid saying something like, \"Let me check my schedule and get back to you.\" Try to treat the person the way you would want to be treated. This means that you should choose your words carefully. Be thoughtful in your response. It\\'s okay to pause before responding. You might be taken by surprise and need a moment to collect your thoughts. Say thank you. It is a compliment to be asked out. You can say, \"I\\'m flattered. Unfortunately, I can\\'t accept.\" Don\\'t laugh. Many people laugh nervously in awkward situations. Try to avoid giggling, as that is likely to result in hurt feelings. Sometimes it is not what you say, but how you say it. If you need to reject someone, think about factors other than your words. Non-verbal communication matters, too. Use the right tone of voice. Try to sound gentle but firm. Make eye contact. This helps convey that you are being serious, and also shows respect for the other person. If you are in public, try not to speak too loudly. It is not necessary for everyone around you to know that you are turning down a date.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n'}]}\n", + "{'role': 'model', 'parts': [{'text': 'Tell the truth. Use a \"compliment sandwich\". Be direct. Treat the person with respect. Communicate effectively.'}]}\n", + "Num validation examples: 100\n" + ] + } + ], + "source": [ + "with tf.io.gfile.GFile(training_dataset_uri_2) as dataset_jsonl_file:\n", + " example_training_dataset = [\n", + " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", + " ]\n", + "\n", + "print()\n", + "\n", + "if validation_dataset_uri:\n", + " with tf.io.gfile.GFile(validation_dataset_uri_2) as dataset_jsonl_file:\n", + " example_validation_dataset = [\n", + " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", + " ]\n", + "\n", + "# Initial dataset stats\n", + "print(\"Num training examples:\", len(example_training_dataset))\n", + "print(\"First example:\")\n", + "for message in example_training_dataset[0][\"contents\"]:\n", + " print(message)\n", + "\n", + "if example_validation_dataset:\n", + " print(\"Num validation examples:\", len(example_validation_dataset))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "id": "K0YOqIbtXwGI" + }, + "outputs": [], + "source": [ + "def validate_dataset_format(dataset):\n", + " \"\"\"Validates the dataset.\n", + "\n", + " Args:\n", + " dataset_uri: The dataset uri to be validated.\n", + " \"\"\"\n", + " format_errors = defaultdict(list)\n", + " if not dataset or len(dataset) == 0:\n", + " print(\"Input dataset file is empty or inaccessible.\")\n", + " return\n", + "\n", + " for row_idx, example in enumerate(dataset):\n", + " # Verify presence of contents list\n", + " if not isinstance(example, dict):\n", + " format_errors[\"invalid_input\"].append(row_idx)\n", + " continue\n", + " contents = example.get(\"contents\", None)\n", + " if not contents:\n", + " format_errors[\"missing_contents_list\"].append(row_idx)\n", + " continue\n", + " try:\n", + " validate_contents(contents, format_errors, row_idx)\n", + " except (TypeError, AttributeError, KeyError) as e:\n", + " print(\"Invalid input during validation: %s\", e)\n", + " format_errors[\"invalid_input\"].append(row_idx)\n", + "\n", + " if format_errors:\n", + " print(\"Found errors for this dataset:\")\n", + " for k, v in format_errors.items():\n", + " print(f\"{k}: {v}\")\n", + " else:\n", + " print(\"No errors found for this dataset.\")\n", + "\n", + "\n", + "def validate_contents(contents, format_errors, row_index):\n", + " \"\"\"Validates contents list format.\"\"\"\n", + " if not isinstance(contents, list):\n", + " format_errors[\"invalid_contents_list\"].append(row_index)\n", + " return\n", + "\n", + " prev_role = None\n", + " for content_item in contents:\n", + " if not isinstance(content_item, dict):\n", + " format_errors[\"invalid_content_item\"].append(row_index)\n", + " return\n", + "\n", + " if \"role\" not in content_item or \"parts\" not in content_item:\n", + " format_errors[\"content_item_missing_key\"].append(row_index)\n", + " return\n", + "\n", + " if content_item.get(\"role\", \"\").lower() not in (\"user\", \"model\"):\n", + " format_errors[\"unrecognized_role\"].append(row_index)\n", + " return\n", + "\n", + " parts = content_item.get(\"parts\", None)\n", + " if not parts or not isinstance(parts, list):\n", + " format_errors[\"missing_or_invalid_parts\"].append(row_index)\n", + " return\n", + "\n", + " for part in parts:\n", + " if not isinstance(part, dict) or \"text\" not in part:\n", + " format_errors[\"invalid_part\"].append(row_index)\n", + " return\n", + "\n", + " if not part.get(\"text\"):\n", + " format_errors[\"missing_text\"].append(row_index)\n", + " return\n", + "\n", + " role = content_item.get(\"role\", \"\").lower()\n", + " # messages to have alternate turns.\n", + " if role == prev_role:\n", + " format_errors[\"consecutive_turns\"].append(row_index)\n", + " return\n", + "\n", + " prev_role = role" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "id": "RppMRP9lIkq2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No errors found for this dataset.\n", + "No errors found for this dataset.\n" + ] + } + ], + "source": [ + "validate_dataset_format(example_training_dataset)\n", + "if example_validation_dataset:\n", + " validate_dataset_format(example_validation_dataset)" + ] } ], "metadata": { From 0af924323208d59df6a64e08d0005c7bc345692a Mon Sep 17 00:00:00 2001 From: Kristopher Overholt Date: Tue, 24 Sep 2024 11:08:04 -0500 Subject: [PATCH 09/76] feat: Add sample notebook for multimodal function calling in Gemini (#1162) # Description This PR adds a sample notebook that uses Gemini multimodal function calling to process images, PDFs, audio, an video inputs. - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- .github/actions/spelling/allow.txt | 6 + .../multimodal_function_calling.ipynb | 1556 +++++++++++++++++ 2 files changed, 1562 insertions(+) create mode 100644 gemini/function-calling/multimodal_function_calling.ipynb diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 199219b230..3c4bb080ba 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -1,5 +1,6 @@ AFX AIP +AMNOSH ANZ APIENTRY APSTUDIO @@ -13,6 +14,7 @@ Ashish Aspeed Autechre Autorater +BIKBEAR BITCODE Benno Bettes @@ -27,6 +29,8 @@ CHECKOV COCOAPODS COINIT CONOUT +CUCUONAR +CWLEY CZE Caprese Codelab @@ -130,6 +134,7 @@ LRESULT LSTATUS LSum LTRB +LUVBPTK Ladhak Lego Llion @@ -298,6 +303,7 @@ bitcoin bpa bqml carbonara +caudatus caxis cfbundle chatbots diff --git a/gemini/function-calling/multimodal_function_calling.ipynb b/gemini/function-calling/multimodal_function_calling.ipynb new file mode 100644 index 0000000000..0c7759da95 --- /dev/null +++ b/gemini/function-calling/multimodal_function_calling.ipynb @@ -0,0 +1,1556 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ijGzTHJJUCPY" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VEqbX8OhE8y9" + }, + "source": [ + "# Multimodal Function Calling with the Gemini API & Python SDK\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Run in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Run in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84e7e432e6ff" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author(s) | [Kristopher Overholt](https://github.com/koverholt) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CkHPv2myT2cx" + }, + "source": [ + "## Overview\n", + "\n", + "### Introduction to Multimodal Function Calling with Gemini\n", + "\n", + "This notebook demonstrates a powerful [Function Calling](https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/function-calling) capability of the Gemini model: support for multimodal inputs. With multimodal function calling, you can go beyond traditional text inputs, enabling Gemini to understand your intent and predict function calls and function parameters based on various inputs like images, audio, video, and PDFs. Function calling can also be referred to as *function calling with controlled generation*, which guarantees that output generated by the model always adheres to a specific schema so that you receive consistently formatted responses.\n", + "\n", + "You can To specify the structure of a model's output, define a response schema, which works like a blueprint for model responses. When you submit a prompt and include the response schema, the model's response always follows your defined schema.\n", + "\n", + "Previously, implementing multimodal function calling required two separate calls to the Gemini API: one to extract information from media, and another to generate a function call based on the extracted text. This process was cumbersome, prone to errors, and resulted in the loss of detail in valuable contextual information. Gemini's multimodal function calling capability streamlines this workflow, enabling a single API call that efficiently processes multimodal inputs for accurate function predictions and structured outputs. \n", + "\n", + "### How It Works\n", + "\n", + "1. **Define Functions and Tools:** Describe your functions, then group them into `Tool` objects for Gemini to use.\n", + "2. **Send Inputs and Prompt:** Provide Gemini with multimodal input (image, audio, PDF, etc.) and a prompt describing your request.\n", + "3. **Gemini Predicts Action:** Gemini analyzes the multimodal input and prompt to predict the best function to call and its parameters.\n", + "4. **Execute and Return:** Use Gemini's prediction to make API calls, then send the results back to Gemini.\n", + "5. **Generate Response:** Gemini uses the API results to provide a final, natural language response to the user. \n", + "\n", + "This notebook will guide you through practical examples of using Gemini's multimodal function calling to build intelligent applications that go beyond the limitations of text-only interactions. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DrkcqHrrwMAo" + }, + "source": [ + "### Objectives\n", + "\n", + "In this tutorial, you will learn how to use the Vertex AI Gemini API with the Vertex AI SDK for Python to make function calls with multimodal inputs, using the Gemini 1.5 Pro (`gemini-1.5-pro`) model. You'll explore how Gemini can process and understand various input types — including images, video, audio, and PDFs — to predict and execute functions.\n", + "\n", + "You will complete the following tasks:\n", + "\n", + "- Install the Vertex AI SDK for Python.\n", + "- Define functions that can be called by Gemini.\n", + "- Package functions into tools.\n", + "- Send multimodal inputs (images, video, audio, PDFs) and prompts to Gemini.\n", + "- Extract predicted function calls and their parameters from Gemini's response.\n", + "- Use the predicted output to make API calls to external systems (demonstrated with an image input example). \n", + "- Return API responses to Gemini for natural language response generation (demonstrated with an image input example). " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C9nEPojogw-g" + }, + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "- Vertex AI\n", + "\n", + "Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r11Gu7qNgx1p" + }, + "source": [ + "## Getting Started\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK for Python\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "# !pip3 install --upgrade --user --quiet google-cloud-aiplatform wikipedia" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart current runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which will restart the current kernel." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "# # Restart kernel after installs so that your environment can access the new packages\n", + "# import IPython\n", + "\n", + "# app = IPython.Application.instance()\n", + "# app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you are running this notebook on Google Colab, run the following cell to authenticate your environment. This step is not required if you are using [Vertex AI Workbench](https://cloud.google.com/vertex-ai-workbench)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", + "\n", + "import vertexai\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a7225e4390a" + }, + "source": [ + "## Multimodal Function Calling in Action" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jXHfaVS66_01" + }, + "source": [ + "### Import libraries\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "lslYAvw37JGQ" + }, + "outputs": [], + "source": [ + "from IPython.display import Markdown, display\n", + "from vertexai.generative_models import (\n", + " Content,\n", + " FunctionDeclaration,\n", + " GenerationConfig,\n", + " GenerativeModel,\n", + " Part,\n", + " Tool,\n", + ")\n", + "import wikipedia" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aa432a6e021a" + }, + "source": [ + "### Image-Based Function Calling: Finding Animal Habitats\n", + "\n", + "In this example, you'll send along an image of a bird and ask Gemini to identify its habitat. This involves defining a function that looks up regions where a given animal is found, creating a tool that uses this function, and then sending a request to Gemini.\n", + "\n", + "First, you define a `FunctionDeclaration` called `get_wildlife_region`. This function takes the name of an animal species as input and returns information about its typical region." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "ae36049d4512" + }, + "outputs": [], + "source": [ + "get_wildlife_region = FunctionDeclaration(\n", + " name=\"get_wildlife_region\",\n", + " description=\"Look up the region where an animal can be found\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"animal\": {\"type\": \"string\", \"description\": \"Species of animal\"}\n", + " },\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0933d807af15" + }, + "source": [ + "Next, you create a `Tool` object that includes your `get_wildlife_region` function. Tools help group related functions that Gemini can use:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "c4218e572dc8" + }, + "outputs": [], + "source": [ + "image_tool = Tool(\n", + " function_declarations=[\n", + " get_wildlife_region,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "28abd9546c48" + }, + "source": [ + "Now you're ready to send a request to Gemini. Initialize the `GenerativeModel` and specify the image to analyze, along with a prompt. The `tools` argument tells Gemini to consider the functions in your `image_tool`." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "38b842d71bce" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "name: \"get_wildlife_region\"\n", + "args {\n", + " fields {\n", + " key: \"animal\"\n", + " value {\n", + " string_value: \"Lilac-breasted Roller\"\n", + " }\n", + " }\n", + "}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = GenerativeModel(\"gemini-1.5-pro-001\")\n", + "generation_config = GenerationConfig(temperature=0)\n", + "\n", + "response = model.generate_content(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/multi-color-bird.jpg\",\n", + " mime_type=\"image/jpeg\",\n", + " ),\n", + " \"What is the typical habitat or region where this animal lives?\",\n", + " ],\n", + " generation_config=generation_config,\n", + " tools=[image_tool],\n", + ")\n", + "response_function_call = response.candidates[0].content\n", + "response.candidates[0].function_calls[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "065787dbaa26" + }, + "source": [ + "Let's examine the response from Gemini. You can extract the predicted function name:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "e2b92a75e5b9" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'get_wildlife_region'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_name = response.candidates[0].function_calls[0].name\n", + "function_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4a6ba2cf6937" + }, + "source": [ + "You can also get the arguments that Gemini predicted for the function call:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "c89f16d5082e" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'animal': 'Lilac-breasted Roller'}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_args = {\n", + " key: value for key, value in response.candidates[0].function_calls[0].args.items()\n", + "}\n", + "function_args" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "180ef53579a0" + }, + "source": [ + "Now, you'll call an external API (in this case, using the `wikipedia` Python package) using the animal name that Gemini extracted from the image:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "07eed3ae7aa3" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'The lilac-breasted roller (Coracias caudatus) is an African bird of the roller family, Coraciidae. It is widely distributed in Southern and Eastern Africa, and is a vagrant to the southern Arabian Peninsula. It prefers open woodland and savanna, and it is for the most part absent from treeless places. Usually found alone or in pairs, it perches at the tops of trees, poles or other high vantage points from where it can spot insects, amphibians and small birds moving about on the ground. Nesting t'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "api_response = wikipedia.page(function_args[\"animal\"]).content\n", + "api_response[:500]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f238cad25c36" + }, + "source": [ + "Finally, you return the API response to Gemini so it can generate a final answer in natural language:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "02ee532ce187" + }, + "outputs": [ + { + "data": { + "text/markdown": [ + "The lilac-breasted roller (Coracias caudatus) is an African bird. It is widely distributed in Southern and Eastern Africa. \n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "response = model.generate_content(\n", + " [\n", + " Content(\n", + " role=\"user\",\n", + " parts=[\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/multi-color-bird.jpg\",\n", + " mime_type=\"image/jpeg\",\n", + " ),\n", + " Part.from_text(\n", + " \"Inspect the image and get the regions where this animal can be found\",\n", + " ),\n", + " ],\n", + " ),\n", + " response_function_call, # Function call response\n", + " Content(\n", + " parts=[\n", + " Part.from_function_response(\n", + " name=function_name,\n", + " response={\n", + " \"content\": api_response, # Return the API response to the Gemini model\n", + " },\n", + " )\n", + " ],\n", + " ),\n", + " ],\n", + " tools=[image_tool],\n", + ")\n", + "\n", + "display(Markdown(response.text))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6e5f13d0e644" + }, + "source": [ + "This example showcases how Gemini's multimodal function calling processes an image, predicts a relevant function and its parameters, and integrates with external APIs to provide comprehensive user information. This process opens up exciting possibilities for building intelligent applications that can \"see\" and understand the world around them via API calls to Gemini." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6036faa1fb70" + }, + "source": [ + "### Video-Based Function Calling: Identifying Product Features" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4dd489a96132" + }, + "source": [ + "Now let's explore how Gemini can extract information from videos for the purpose of invoking a function call. You'll use a video showcasing multiple products and ask Gemini to identify its key features.\n", + "\n", + "Start by defining a function called `get_feature_info` that takes a list of product features as input and could potentially be used to retrieve additional details about those features:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "41d1ed66b8b3" + }, + "outputs": [], + "source": [ + "get_feature_info = FunctionDeclaration(\n", + " name=\"get_feature_info\",\n", + " description=\"Get additional information about a product feature\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"features\": {\n", + " \"type\": \"array\",\n", + " \"description\": \"A list of product features\",\n", + " \"items\": {\"type\": \"string\", \"description\": \"Product feature\"},\n", + " }\n", + " },\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b972769f37e6" + }, + "source": [ + "Next, create a tool that includes your `get_feature_info` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "f134fc04e6bb" + }, + "outputs": [], + "source": [ + "video_tool = Tool(\n", + " function_declarations=[\n", + " get_feature_info,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c16c497b85d3" + }, + "source": [ + "Send a video to Gemini, along with a prompt asking for information about the product features, making sure to include your `video_tool` in the `tools` kwarg:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "09fbe282c3d3" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"get_feature_info\"\n", + " args {\n", + " fields {\n", + " key: \"features\"\n", + " value {\n", + " list_value {\n", + " values {\n", + " string_value: \"Gemini Live\"\n", + " }\n", + " values {\n", + " string_value: \"Made You Look\"\n", + " }\n", + " values {\n", + " string_value: \"Add me\"\n", + " }\n", + " values {\n", + " string_value: \"Magic Editor\"\n", + " }\n", + " values {\n", + " string_value: \"Pixel 9 Pro Fold\"\n", + " }\n", + " values {\n", + " string_value: \"Pixel Buds Pro 2\"\n", + " }\n", + " values {\n", + " string_value: \"Call Notes\"\n", + " }\n", + " values {\n", + " string_value: \"Pixel Screenshots\"\n", + " }\n", + " values {\n", + " string_value: \"Pixel Studio\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = GenerativeModel(\"gemini-1.5-pro-001\")\n", + "generation_config = GenerationConfig(temperature=0)\n", + "\n", + "response = model.generate_content(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/made-by-google-24.mp4\",\n", + " mime_type=\"video/mp4\",\n", + " ),\n", + " \"Inspect the video and get information about the product features shown\",\n", + " ],\n", + " generation_config=generation_config,\n", + " tools=[video_tool],\n", + ")\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4115fd61850b" + }, + "source": [ + "Gemini correctly predicted the `get_feature_info` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "1ae3bb7a4847" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'get_feature_info'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_name = response.candidates[0].function_calls[0].name\n", + "function_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c17668290dd0" + }, + "source": [ + "And you can see the list of product features that Gemini extracted from the video, which are available as structured function arguments that adhere to the JSON schema we defined in the `FunctionDeclaration`:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "id": "992c59809c7b" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'features': ['Gemini Live', 'Made You Look', 'Add me', 'Magic Editor', 'Pixel 9 Pro Fold', 'Pixel Buds Pro 2', 'Call Notes', 'Pixel Screenshots', 'Pixel Studio']}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_args = {\n", + " key: value for key, value in response.candidates[0].function_calls[0].args.items()\n", + "}\n", + "function_args" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bd8dd10f1219" + }, + "source": [ + "This example demonstrates Gemini's ability to understand video content. By defining a relevant function, you can use Gemini to extract structured information from videos and perform further actions based on that information.\n", + "\n", + "Now that the multimodal function call response is complete, you could use the function name and function arguments to call an external API using any REST API or client library of your choice, similar to how we did in the previous example with the `wikipedia` Python package.\n", + "\n", + "Since this sample notebook is focused on the mechanics of multimodal function calling rather than the subsequent function calls and API calls, we'll move on to another example with different multimodal inputs. You can refer to other sample notebooks on Gemini Function Calling for more details on where to go from here." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "41a08eda3be8" + }, + "source": [ + "### Audio-Based Function Calling: Generating Book Recommendations" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a94cd078b43c" + }, + "source": [ + "In this example, you'll explore using audio input with Gemini's multimodal function calling. You'll send a podcast episode to Gemini and ask for book recommendations related to the topics discussed.\n", + "\n", + "Define a function called `get_recommended_books` that takes a list of topics as input and (hypothetically) returns relevant book recommendations:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "id": "9714025043bd" + }, + "outputs": [], + "source": [ + "get_recommended_books = FunctionDeclaration(\n", + " name=\"get_recommended_books\",\n", + " description=\"Get recommended books based on a list of topics\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"topics\": {\n", + " \"type\": \"array\",\n", + " \"description\": \"A list of topics\",\n", + " \"items\": {\"type\": \"string\", \"description\": \"Topic\"},\n", + " },\n", + " },\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f23465d0938f" + }, + "source": [ + "Now create a tool that includes your newly defined function:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "id": "d61600788e03" + }, + "outputs": [], + "source": [ + "audio_tool = Tool(\n", + " function_declarations=[\n", + " get_recommended_books,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "560afbd15a17" + }, + "source": [ + "Provide Gemini with the audio file and a prompt to recommend books based on the podcast content:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "47228e6631a3" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"get_recommended_books\"\n", + " args {\n", + " fields {\n", + " key: \"topics\"\n", + " value {\n", + " list_value {\n", + " values {\n", + " string_value: \"Software Reliability\"\n", + " }\n", + " values {\n", + " string_value: \"Site Reliability Engineering\"\n", + " }\n", + " values {\n", + " string_value: \"DevOps\"\n", + " }\n", + " values {\n", + " string_value: \"Cloud Computing\"\n", + " }\n", + " values {\n", + " string_value: \"System Thinking\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = GenerativeModel(\"gemini-1.5-pro-001\")\n", + "generation_config = GenerationConfig(temperature=0)\n", + "\n", + "response = model.generate_content(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/google-cloud-sre-podcast-s2-e8.mp3\",\n", + " mime_type=\"audio/mpeg\",\n", + " ),\n", + " \"Inspect the audio file and generate a list of recommended books based on the topics discussed\",\n", + " ],\n", + " generation_config=generation_config,\n", + " tools=[audio_tool],\n", + ")\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "db9b85de9752" + }, + "source": [ + "You can see that Gemini has successfully predicted your `get_recommended_books` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "id": "eabef4d9faf4" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'get_recommended_books'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_name = response.candidates[0].function_calls[0].name\n", + "function_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ea00f52eb487" + }, + "source": [ + "And the function arguments contain the list of topics that Gemini identified and extracted from the input audio file:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "id": "8c8f32e930c9" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'topics': ['Software Reliability', 'Site Reliability Engineering', 'DevOps', 'Cloud Computing', 'System Thinking']}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_args = {\n", + " key: value for key, value in response.candidates[0].function_calls[0].args.items()\n", + "}\n", + "function_args" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8acd15dd7cec" + }, + "source": [ + "This example highlights Gemini's capacity to understand and extract information from audio, enabling you to create applications that respond to spoken content or audio-based interactions." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "93577f2d2fe1" + }, + "source": [ + "### PDF-Based Function Calling: Extracting Company Data from Invoices" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "924cf8d1c711" + }, + "source": [ + "This example demonstrates how to use Gemini's multimodal function calling to process PDF documents. You'll work with a set of invoices and extract the names of the (fictitious) companies involved.\n", + "\n", + "Define a function called `get_company_information` that (in a real-world scenario) could be used to fetch details about a given list of companies:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "id": "ba57e626e9d2" + }, + "outputs": [], + "source": [ + "get_company_information = FunctionDeclaration(\n", + " name=\"get_company_information\",\n", + " description=\"Get information about a list of companies\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"companies\": {\n", + " \"type\": \"array\",\n", + " \"description\": \"A list of companies\",\n", + " \"items\": {\"type\": \"string\", \"description\": \"Company name\"},\n", + " }\n", + " },\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fae1c7a7d8a9" + }, + "source": [ + "Package your newly defined function into a tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "id": "a62ed01019f0" + }, + "outputs": [], + "source": [ + "invoice_tool = Tool(\n", + " function_declarations=[\n", + " get_company_information,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "91dcbcbb0f50" + }, + "source": [ + "Now you can provide Gemini with multiple PDF invoices and ask it to get company information:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "id": "e509abf4d73a" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"get_company_information\"\n", + " args {\n", + " fields {\n", + " key: \"companies\"\n", + " value {\n", + " list_value {\n", + " values {\n", + " string_value: \"AMNOSH\\\\nSUPPLIERS\"\n", + " }\n", + " values {\n", + " string_value: \"BIKBEAR\\\\nLAW FIRM\"\n", + " }\n", + " values {\n", + " string_value: \"CUCUONAR\\\\nAGENCY\"\n", + " }\n", + " values {\n", + " string_value: \"CWLEY PLC\"\n", + " }\n", + " values {\n", + " string_value: \"LUVBPTK Company\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = GenerativeModel(\"gemini-1.5-pro-001\")\n", + "generation_config = GenerationConfig(temperature=0)\n", + "\n", + "response = model.generate_content(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/invoice-synthetic-1.pdf\",\n", + " mime_type=\"application/pdf\",\n", + " ),\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/invoice-synthetic-2.pdf\",\n", + " mime_type=\"application/pdf\",\n", + " ),\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/invoice-synthetic-3.pdf\",\n", + " mime_type=\"application/pdf\",\n", + " ),\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/invoice-synthetic-4.pdf\",\n", + " mime_type=\"application/pdf\",\n", + " ),\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/invoice-synthetic-5.pdf\",\n", + " mime_type=\"application/pdf\",\n", + " ),\n", + " \"Inspect the PDF files of invoices and retrieve information about each company\",\n", + " ],\n", + " generation_config=generation_config,\n", + " tools=[invoice_tool],\n", + ")\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "974a138f3c6c" + }, + "source": [ + "As expected, Gemini predicted the `get_company_information` function:" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "id": "766fdbafed76" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'get_company_information'" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_name = response.candidates[0].function_calls[0].name\n", + "function_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c80e9f280f5c" + }, + "source": [ + "The function arguments contain the list of company names extracted from the PDF invoices:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "id": "9fa7a22d85b2" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'companies': ['AMNOSH\\\\nSUPPLIERS', 'BIKBEAR\\\\nLAW FIRM', 'CUCUONAR\\\\nAGENCY', 'CWLEY PLC', 'LUVBPTK Company']}" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "function_args = {\n", + " key: value for key, value in response.candidates[0].function_calls[0].args.items()\n", + "}\n", + "function_args" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "03e710b9d195" + }, + "source": [ + "This example shows the power of Gemini for processing and extracting structured data from documents, a common requirement in many real-world applications." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "77f53d886376" + }, + "source": [ + "### Image-Based Chat: Building a Multimodal Chatbot" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d145dc63a74a" + }, + "source": [ + "Let's put it all together and build a simple multimodal chatbot. This chatbot will understand image inputs and respond to questions using the functions you define.\n", + "\n", + "First, define three functions: `get_animal_details`, `get_location_details`, and `check_color_palette`. These functions represent the capabilities of your chatbot and could potentially be used to retrieve additional details using REST API calls:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "id": "1618b25ad3e0" + }, + "outputs": [], + "source": [ + "get_animal_details = FunctionDeclaration(\n", + " name=\"get_animal_details\",\n", + " description=\"Look up information about a given animal species\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"animal\": {\"type\": \"string\", \"description\": \"Species of animal\"}\n", + " },\n", + " },\n", + ")\n", + "\n", + "get_location_details = FunctionDeclaration(\n", + " name=\"get_location_details\",\n", + " description=\"Look up information about a given location\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\"location\": {\"type\": \"string\", \"description\": \"Location\"}},\n", + " },\n", + ")\n", + "\n", + "check_color_palette = FunctionDeclaration(\n", + " name=\"check_color_palette\",\n", + " description=\"Check hex color codes for accessibility\",\n", + " parameters={\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"colors\": {\n", + " \"type\": \"array\",\n", + " \"description\": \"A list of colors in hexadecimal format\",\n", + " \"items\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Hexadecimal representation of color, as in #355E3B\",\n", + " },\n", + " }\n", + " },\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ca63d74adeba" + }, + "source": [ + "Group your functions into a tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "id": "178ce7754626" + }, + "outputs": [], + "source": [ + "chat_tool = Tool(\n", + " function_declarations=[\n", + " get_animal_details,\n", + " get_location_details,\n", + " check_color_palette,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eb1e7629b9e" + }, + "source": [ + "Initialize the `GenerativeModel` and start a chat session with Gemini, providing it with your `chat_tool`:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "id": "ac1ebff348c9" + }, + "outputs": [], + "source": [ + "model = GenerativeModel(\n", + " \"gemini-1.5-pro-001\",\n", + " generation_config=GenerationConfig(temperature=0),\n", + " tools=[chat_tool],\n", + ")\n", + "\n", + "chat = model.start_chat()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "05bb7db4be62" + }, + "source": [ + "Send an image of a fox, along with a simple prompt:" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "id": "c3e47a96df7e" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"get_animal_details\"\n", + " args {\n", + " fields {\n", + " key: \"animal\"\n", + " value {\n", + " string_value: \"fox\"\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = chat.send_message(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/baby-fox.jpg\",\n", + " mime_type=\"image/jpeg\",\n", + " ),\n", + " \"Tell me about this animal\",\n", + " ]\n", + ")\n", + "\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "38c96a599b94" + }, + "source": [ + "Now ask about the location details in the image:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "id": "153f7b93eb65" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"get_location_details\"\n", + " args {\n", + " fields {\n", + " key: \"location\"\n", + " value {\n", + " string_value: \"meadow\"\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = chat.send_message(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/baby-fox.jpg\",\n", + " mime_type=\"image/jpeg\",\n", + " ),\n", + " \"Tell me details about this location\",\n", + " ]\n", + ")\n", + "\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "db6363659da8" + }, + "source": [ + "And finally, ask for a color palette based the image:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "id": "af519b9c7bc5" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"check_color_palette\"\n", + " args {\n", + " fields {\n", + " key: \"colors\"\n", + " value {\n", + " list_value {\n", + " values {\n", + " string_value: \"#A85C4C\"\n", + " }\n", + " values {\n", + " string_value: \"#4C8A6C\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }]" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = chat.send_message(\n", + " [\n", + " Part.from_uri(\n", + " \"gs://github-repo/generative-ai/gemini/function-calling/baby-fox.jpg\",\n", + " mime_type=\"image/jpeg\",\n", + " ),\n", + " \"Get the color palette of this image and check it for accessibility\",\n", + " ]\n", + ")\n", + "\n", + "response.candidates[0].function_calls" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4e38eb5eeb5b" + }, + "source": [ + "While this chatbot doesn't actually execute the predicted functions, it demonstrates creating an interactive experience using multimodal inputs and function calling in a chat format. You can extend this example by implementing REST API calls or client library requests for each function to create a truly functional and engaging multimodal chatbot that's connected to the real world." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ee5711d51ae0" + }, + "source": [ + "## Conclusions\n", + "\n", + "In this notebook, you explored the powerful capabilities of Gemini's multimodal function calling. You learned how to:\n", + "\n", + "- Define functions and package them into tools.\n", + "- Send multimodal inputs (images, video, audio, PDFs) and prompts to Gemini. \n", + "- Extract predicted function calls and their parameters.\n", + "- Use the predicted output to make (or potentially make) API calls.\n", + "- Return API responses to Gemini for natural language generation. \n", + "\n", + "You've seen how Gemini can understand and act on a range of different multimodal inputs, which opens up a world of possibilities for building innovative and engaging multimodal applications. You can now use these powerful tools to create your own intelligent applications that seamlessly integrate media, natural language, and calls to external APIs and system.\n", + "\n", + "Experiment with different modalities, functions, and prompts to discover the full potential of Gemini's multimodal and function calling capabilities. And you can continue learning by exploring other sample notebooks in this repository and exploring the [documentation for Gemini Function Calling](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling). " + ] + } + ], + "metadata": { + "colab": { + "name": "multimodal_function_calling.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 193ca27e2de3428e6da6ab4d7b5fee47fac3191e Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Tue, 24 Sep 2024 14:13:23 -0400 Subject: [PATCH 10/76] refactor: Update long context window information (#1169) # Description Update long context window information for Gemini 1.5. Gemin 1.5 Flash 1M Gemini 1.5 Pro 2M --- gemini/long-context/intro_long_context.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/long-context/intro_long_context.ipynb b/gemini/long-context/intro_long_context.ipynb index 8221041912..e93f835a9a 100644 --- a/gemini/long-context/intro_long_context.ipynb +++ b/gemini/long-context/intro_long_context.ipynb @@ -74,7 +74,7 @@ "source": [ "## Overview\n", "\n", - "Historically, large language models (LLMs) were significantly limited by the amount of text (or tokens) that could be passed to the model at one time. Gemini 1.5 Flash and Gemini 1.5 Pro support a 1 million token context window, with [near-perfect retrieval (>99%)](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf), which unlocks many new use cases and developer paradigms.\n", + "Gemini 1.5 Flash comes standard with a 1 million token context window, and Gemini 1.5 Pro comes with a 2 million token context window. Historically, large language models (LLMs) were significantly limited by the amount of text (or tokens) that could be passed to the model at one time. The Gemini 1.5 long context window, with [near-perfect retrieval (>99%)](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf), unlocks many new use cases and developer paradigms.\n", "\n", "In practice, 1 million tokens would look like:\n", "\n", From 8ebaece186d189afdd47fd0dfda0be7b56f13fba Mon Sep 17 00:00:00 2001 From: Deepak moonat Date: Wed, 25 Sep 2024 08:19:56 +0530 Subject: [PATCH 11/76] update: model to 002 version (#1170) # Description Supervised finetuning using gemini-1.5-pro-002 - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .../supervised_finetuning_using_gemini.ipynb | 111 ++++++++---------- 1 file changed, 48 insertions(+), 63 deletions(-) diff --git a/gemini/tuning/supervised_finetuning_using_gemini.ipynb b/gemini/tuning/supervised_finetuning_using_gemini.ipynb index 3a5bbbd169..beecd15313 100644 --- a/gemini/tuning/supervised_finetuning_using_gemini.ipynb +++ b/gemini/tuning/supervised_finetuning_using_gemini.ipynb @@ -97,6 +97,10 @@ "| Chat | 1000+ | 2-4 |\n", "\n", "\n", + "
\n", + "\n", + "Refer to public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning) for more details.\n", + "\n", "\n", "
\n", "\n", @@ -117,7 +121,7 @@ "source": [ "### Objective\n", "\n", - "In this tutorial, you will learn how to use `Vertex AI` to tune a Parameter Efficient FineTuned(PEFT) `gemini-1.5-pro` model.\n", + "In this tutorial, you will learn how to use `Vertex AI` to tune a `gemini-1.5-pro` model.\n", "\n", "\n", "This tutorial uses the following Google Cloud ML services:\n", @@ -128,7 +132,7 @@ "The steps performed include:\n", "\n", "- Prepare and load the dataset\n", - "- Load the `gemini-1.5-pro-001` model\n", + "- Load the `gemini-1.5-pro-002` model\n", "- Evaluate the model before tuning\n", "- Tune the model.\n", " - This will automatically create a Vertex AI endpoint and deploy the model to it\n", @@ -384,7 +388,7 @@ "id": "JUEloBlsCPFr" }, "source": [ - "## Step3: Create Dataset in required format\n", + "## Step3: Create Dataset in correct format\n", "\n", "The dataset used to tune a foundation model needs to include examples that align with the task that you want the model to perform. Structure your training dataset in a text-to-text format. Each record, or row, in the dataset contains the input text (also referred to as the prompt) which is paired with its expected output from the model. Supervised tuning uses the dataset to teach the model to mimic a behavior, or task, you need by giving it hundreds of examples that illustrate that behavior.\n", "\n", @@ -417,7 +421,10 @@ " # ... repeat \"user\", \"model\" for multi turns.\n", " ]\n", "}\n", - "```" + "```\n", + "\n", + "\n", + "Refer to the public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-prepare#about-datasets) for more details." ] }, { @@ -489,7 +496,7 @@ }, "source": [ "- Data used in this notebook is present in the public Google Cloud Storage(GCS) bucket.\n", - "- It's in Gemini 1.0 dataset format for finetuning" + "- It's in Gemini 1.0 finetuning dataset format" ] }, { @@ -503,15 +510,6 @@ "!gsutil ls gs://github-repo/generative-ai/gemini/tuning/summarization/wikilingua" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "tKupp3g6W1XS" - }, - "source": [ - "#### Download the dataset" - ] - }, { "cell_type": "code", "execution_count": null, @@ -734,7 +732,7 @@ "source": [ "The following Gemini text models support supervised tuning:\n", "\n", - "* `gemini-1.5-pro-001`" + "* `gemini-1.5-pro-002`" ] }, { @@ -745,7 +743,7 @@ }, "outputs": [], "source": [ - "base_model = \"gemini-1.5-pro-001\"\n", + "base_model = \"gemini-1.5-pro-002\"\n", "generation_model = GenerativeModel(base_model)" ] }, @@ -894,6 +892,7 @@ "| Text Generation | BLEU, ROUGE-L |\n", "\n", "\n", + "
\n", "\n", "Refer to this [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluate-models) for metric based evaluation." ] @@ -1043,6 +1042,17 @@ "corpus_batch = corpus[:100]" ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "oM10zigp7kTZ" + }, + "source": [ + "
\n", + "⚠️ It will take ~5 mins for the evaluation run on the provided batch. ⚠️\n", + "
" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1127,8 +1137,6 @@ "outputs": [], "source": [ "tuned_model_display_name = \"\" # @param {type:\"string\"}\n", - "epochs = 1 # @param {type:\"integer\"}\n", - "learning_rate_multiplier = 1\n", "\n", "# Tune a model using `train` method.\n", "sft_tuning_job = sft.train(\n", @@ -1136,8 +1144,6 @@ " train_dataset=f\"{BUCKET_URI}/train/sft_train_samples.jsonl\",\n", " # Optional:\n", " validation_dataset=f\"{BUCKET_URI}/val/sft_val_samples.jsonl\",\n", - " epochs=epochs,\n", - " learning_rate_multiplier=learning_rate_multiplier,\n", " tuned_model_display_name=tuned_model_display_name,\n", ")" ] @@ -1183,7 +1189,7 @@ }, "source": [ "
\n", - "⚠️ It will take around an hour for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", + "⚠️ It will take ~30 mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", "
" ] }, @@ -1296,7 +1302,7 @@ "outputs": [], "source": [ "# Read data from Tensorboard\n", - "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name}\"\n", + "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name.replace(experiment.name, '')[1:]}\"\n", "tensorboard_run = aiplatform.TensorboardRun(tensorboard_run_name)\n", "metrics = tensorboard_run.read_time_series_data()" ] @@ -1438,7 +1444,7 @@ { "cell_type": "markdown", "metadata": { - "id": "QB2Pnpp6dYwh" + "id": "ef7acd61d12d" }, "source": [ "```\n", @@ -1446,58 +1452,31 @@ " content {\n", " role: \"model\"\n", " parts {\n", - " text: \"Squeeze lotion onto your forearms. Place your forearms against your back. Rub your forearms up and down your back.\\n\\n\"\n", + " text: \"Squeeze a line of lotion onto the top of each forearm. Place your forearms behind your back. Rub your forearms up and down your back.\\n\\n\"\n", " }\n", " }\n", " finish_reason: STOP\n", - " safety_ratings {\n", - " category: HARM_CATEGORY_HATE_SPEECH\n", - " probability: NEGLIGIBLE\n", - " probability_score: 0.083984375\n", - " severity: HARM_SEVERITY_NEGLIGIBLE\n", - " severity_score: 0.10498046875\n", - " }\n", - " safety_ratings {\n", - " category: HARM_CATEGORY_DANGEROUS_CONTENT\n", - " probability: NEGLIGIBLE\n", - " probability_score: 0.41015625\n", - " severity: HARM_SEVERITY_NEGLIGIBLE\n", - " severity_score: 0.1904296875\n", - " }\n", - " safety_ratings {\n", - " category: HARM_CATEGORY_HARASSMENT\n", - " probability: NEGLIGIBLE\n", - " probability_score: 0.24609375\n", - " severity: HARM_SEVERITY_NEGLIGIBLE\n", - " severity_score: 0.130859375\n", - " }\n", - " safety_ratings {\n", - " category: HARM_CATEGORY_SEXUALLY_EXPLICIT\n", - " probability: NEGLIGIBLE\n", - " probability_score: 0.3671875\n", - " severity: HARM_SEVERITY_LOW\n", - " severity_score: 0.3203125\n", - " }\n", - " avg_logprobs: -0.3171907354284216\n", + " avg_logprobs: -0.39081838726997375\n", "}\n", "usage_metadata {\n", - " prompt_token_count: 263\n", - " candidates_token_count: 27\n", - " total_token_count: 290\n", + " prompt_token_count: 261\n", + " candidates_token_count: 32\n", + " total_token_count: 293\n", "}\n", + "\n", "```" ] }, { "cell_type": "markdown", "metadata": { - "id": "aL4Ie5wFwDTa" + "id": "d54ce2b88af3" }, "source": [ "- We can clearly see the difference between summary generated pre and post tuning, as tuned summary is more inline with the ground truth format (**Note**: Pre and Post outputs, might vary based on the set parameters.)\n", "\n", " - *Pre*: `This article describes a method for applying lotion to your own back using your forearms. The technique involves squeezing lotion in a line along your forearms, bending your elbows, and rubbing your arms against your back in a windshield wiper motion. This method may not be suitable for individuals with shoulder pain or limited flexibility.`\n", - " - *Post*: `Squeeze lotion onto your forearms. Place your forearms against your back. Rub your forearms up and down your back.`\n", + " - *Post*: `Squeeze a line of lotion onto the top of each forearm. Place your forearms behind your back. Rub your forearms up and down your back`\n", " - *Ground Truth*:` Squeeze a line of lotion onto the tops of both forearms and the backs of your hands. Place your arms behind your back. Move your arms in a windshield wiper motion.`" ] }, @@ -1510,6 +1489,17 @@ "## Step9: Evaluation post model tuning" ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "mwlCcKPZ62Of" + }, + "source": [ + "
\n", + "⚠️ It will take ~5 mins for the evaluation on the provided batch. ⚠️\n", + "
" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1518,7 +1508,6 @@ }, "outputs": [], "source": [ - "%%time\n", "# run evaluation\n", "evaluation_df_post_tuning = run_evaluation(tuned_genai_model, corpus_batch)" ] @@ -1704,10 +1693,6 @@ ], "metadata": { "colab": { - "collapsed_sections": [ - "8DzlWWKpbGcu", - "NuN-m1Ikbn15" - ], "name": "supervised_finetuning_using_gemini.ipynb", "toc_visible": true }, From 354f2cfe01ff9397596488f23fec7c03df891568 Mon Sep 17 00:00:00 2001 From: Deepak moonat Date: Wed, 25 Sep 2024 09:34:16 +0530 Subject: [PATCH 12/76] update: markdown (#1171) # Description Supervised finetuning using gemini-1.5-pro-002 - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .../supervised_finetuning_using_gemini.ipynb | 53 +++++++------------ 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/gemini/tuning/supervised_finetuning_using_gemini.ipynb b/gemini/tuning/supervised_finetuning_using_gemini.ipynb index beecd15313..7297400a9e 100644 --- a/gemini/tuning/supervised_finetuning_using_gemini.ipynb +++ b/gemini/tuning/supervised_finetuning_using_gemini.ipynb @@ -86,18 +86,6 @@ " - Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", " - Evaluation Set: Use a separate set of data to evaluate the model's performance\n", "\n", - "### Recommended configurations\n", - "The following table shows the recommended configurations for tuning a foundation model by task:\n", - "\n", - "| Task | No. of examples in dataset | Number of epochs |\n", - "| -------------- | -------------------------- | ----------- |\n", - "| Classification | 500+ | 2-4 |\n", - "| Summarization | 1000+ | 2-4 |\n", - "| Extractive QA | 500+ | 2-4 |\n", - "| Chat | 1000+ | 2-4 |\n", - "\n", - "\n", - "
\n", "\n", "Refer to public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning) for more details.\n", "\n", @@ -376,7 +364,7 @@ }, "outputs": [], "source": [ - "PROJECT_ID = \"\" # @param\n", + "PROJECT_ID = \"[YOUR_PROJECT_ID]\" # @param\n", "LOCATION = \"us-central1\" # @param\n", "\n", "vertexai.init(project=PROJECT_ID, location=LOCATION)" @@ -456,7 +444,7 @@ "outputs": [], "source": [ "# Provide a bucket name\n", - "BUCKET_NAME = \"\" # @param {type:\"string\"}\n", + "BUCKET_NAME = \"[YOUR_BUCKET_NAME]\" # @param {type:\"string\"}\n", "BUCKET_URI = f\"gs://{BUCKET_NAME}\"" ] }, @@ -884,12 +872,12 @@ "id": "t6oLtUEWMHVu" }, "source": [ - "| Task | Metric\n", - "|:---------|:--------:|\n", - "| Classification | Micro-F1, Macro-F1, Per class F1 |\n", - "| Summarization | ROUGE-L |\n", - "| Question Answering | Exact Match |\n", - "| Text Generation | BLEU, ROUGE-L |\n", + "| Task | Metric(s) |\n", + "|-----------------|---------------------------------|\n", + "| Classification | Micro-F1, Macro-F1, Per class F1 |\n", + "| Summarization | ROUGE-L |\n", + "| Question Answering | Exact Match |\n", + "| Text Generation | BLEU, ROUGE-L |\n", "\n", "\n", "
\n", @@ -1123,11 +1111,21 @@ "\n", " *Optional parameters*\n", " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", + " - `tuned_model_display_name`: Display name for the tuned model.\n", " - `epochs`: The number of training epochs to run.\n", " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", " - `adapter_size` : Gemini 1.5 Pro supports Adapter length [1, 4], default value is 4." ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "4e81137766c6" + }, + "source": [ + "**Note: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1136,7 +1134,7 @@ }, "outputs": [], "source": [ - "tuned_model_display_name = \"\" # @param {type:\"string\"}\n", + "tuned_model_display_name = \"[DISPLAY NAME FOR TUNED MODEL]\" # @param {type:\"string\"}\n", "\n", "# Tune a model using `train` method.\n", "sft_tuning_job = sft.train(\n", @@ -1671,19 +1669,6 @@ "\n", "print(\"***\" * 10)\n", "\n", - "# Delete Model.\n", - "delete_model = True\n", - "if delete_model:\n", - " # Remove version from model name.\n", - " tuned_model_name = tuned_model_name.split(\"@\")[0]\n", - " for model in aiplatform.Model.list():\n", - " if model.resource_name == tuned_model_name:\n", - " print(model.resource_name)\n", - " model.delete()\n", - " break\n", - "\n", - "print(\"***\" * 10)\n", - "\n", "# Delete Cloud Storage Bucket.\n", "delete_bucket = True\n", "if delete_bucket:\n", From c347390697e853253e62d210e4d036ba3e6fa544 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Wed, 25 Sep 2024 12:41:29 -0400 Subject: [PATCH 13/76] fix: Fix the PDF file processing and audio transcription (#1172) # Description Fix the PDF file processing and audio transcription: - Remove timecode output for now as it's not totally accurate - Re-define PDF and image fileData in PDF analysis --- .../intro_gemini_1_5_flash.ipynb | 7 +- .../intro_gemini_1_5_pro.ipynb | 143 +++++------------- 2 files changed, 41 insertions(+), 109 deletions(-) diff --git a/gemini/getting-started/intro_gemini_1_5_flash.ipynb b/gemini/getting-started/intro_gemini_1_5_flash.ipynb index 34f6135b34..2cc72107ee 100644 --- a/gemini/getting-started/intro_gemini_1_5_flash.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_flash.ipynb @@ -226,11 +226,7 @@ }, "outputs": [], "source": [ - "from IPython.core.interactiveshell import InteractiveShell\n", "import IPython.display\n", - "\n", - "InteractiveShell.ast_node_interactivity = \"all\"\n", - "\n", "from vertexai.generative_models import (\n", " GenerationConfig,\n", " GenerativeModel,\n", @@ -408,7 +404,7 @@ "outputs": [], "source": [ "prompt = \"\"\"\n", - " Can you transcribe this interview, in the format of timecode, speaker, caption.\n", + " Can you transcribe this interview, in the format of speaker, caption.\n", " Use speaker A, speaker B, etc. to identify the speakers.\n", "\"\"\"\n", "\n", @@ -552,6 +548,7 @@ "Context:\n", "\"\"\"\n", "\n", + "pdf_file = Part.from_uri(pdf_file_uri, mime_type=\"application/pdf\")\n", "image_file = Part.from_uri(image_file_uri, mime_type=\"image/png\")\n", "\n", "contents = [\n", diff --git a/gemini/getting-started/intro_gemini_1_5_pro.ipynb b/gemini/getting-started/intro_gemini_1_5_pro.ipynb index 59cda2f42e..1b2d1c36a3 100644 --- a/gemini/getting-started/intro_gemini_1_5_pro.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_pro.ipynb @@ -127,7 +127,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": { "id": "XRvKdaPDTznN" }, @@ -166,7 +166,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "id": "NyKGtVQjgx13" }, @@ -195,7 +195,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "id": "Nqwi-5ufWp_B" }, @@ -220,17 +220,13 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "lslYAvw37JGQ" }, "outputs": [], "source": [ - "from IPython.core.interactiveshell import InteractiveShell\n", "import IPython.display\n", - "\n", - "InteractiveShell.ast_node_interactivity = \"all\"\n", - "\n", "from vertexai.generative_models import (\n", " GenerationConfig,\n", " GenerativeModel,\n", @@ -253,7 +249,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "id": "U7ExWmuLBdIA" }, @@ -390,7 +386,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 26, "metadata": { "id": "10hgCOIA4E5_" }, @@ -409,7 +405,7 @@ "" ] }, - "execution_count": 10, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -433,7 +429,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 28, "metadata": { "id": "OPQ1fBk44E6L" }, @@ -442,25 +438,18 @@ "name": "stdout", "output_type": "stream", "text": [ - "## Pixel Feature Drops: March 2023\n", - "\n", - "This podcast discusses the Pixel feature drops, specifically the March 2023 drop, with product managers Aisha Sharif and DeCarlos Love. \n", - "\n", - "### Chapters:\n", - " \n", - "* **00:00 - 00:13**: Introduction. \n", - "* **00:14 - 00:22**: Podcast introduction. \n", - "* **00:22 - 01:07**: Transformative Pixel features. \n", - "* **01:07 - 02:27**: Importance of feature drops. \n", - "* **02:28 - 03:30**: Highlights from January feature drop.\n", - "* **03:31 - 04:52**: March feature drop for Pixel Watch.\n", - "* **04:53 - 06:08**: March feature drop for Pixel phones. \n", - "* **06:09 - 07:23**: Deciding what's in a feature drop. \n", - "* **07:24 - 07:57**: User feedback and feature drops. \n", - "* **07:58 - 08:11**: Release date for March feature drop. \n", - "* **08:12 - 08:58**: Favorite feature drops. \n", - "* **08:59 - 10:05**: More favorite feature drops. \n", - "* **10:06 - 10:28**: Outro and thanks. \n", + "## Made By Google Podcast: March Feature Drop\n", + "\n", + "**Chapter 1: Favorite Pixel Features**\n", + "**Chapter 2: The Importance of Feature Drops**\n", + "**Chapter 3: Highlights from the January Feature Drop**\n", + "**Chapter 4: What’s New for Pixel Watch in March**\n", + "**Chapter 5: What’s New for Pixel Phones in March**\n", + "**Chapter 6: Updates for the Rest of the Pixel Portfolio**\n", + "**Chapter 7: What Drives Feature Choice?**\n", + "**Chapter 8: The Value of User Feedback**\n", + "**Chapter 9: Feature Drop Release Date**\n", + "**Chapter 10: Memorable Feature Drops** \n", "\n" ] } @@ -497,7 +486,7 @@ "outputs": [], "source": [ "prompt = \"\"\"\n", - " Can you transcribe this interview, in the format of timecode, speaker, caption.\n", + " Can you transcribe this interview, in the format of speaker, caption.\n", " Use speaker A, speaker B, etc. to identify the speakers.\n", "\"\"\"\n", "\n", @@ -606,7 +595,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 30, "metadata": { "id": "JgKDIZUstYwV" }, @@ -615,18 +604,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "This is a technical report introducing Gemini 1.5 Pro, Google's latest multi-modal model. The model is built upon the mixture-of-experts (MoE) architecture and exhibits impressive performance on reasoning, multi-modality, and long context understanding. Gemini 1.5 Pro distinguishes itself by expanding the context window size to several million tokens, a significant leap beyond the 200k tokens offered by its predecessor, Claude 2.1. This expanded capacity allows for processing nearly five days of audio, entire books, or extensive code repositories. \n", + "The document presents Gemini 1.5 Pro, a new multimodal model from Google DeepMind, which excels in understanding and processing extremely long contexts, up to millions of tokens, across various modalities including text, video and audio. \n", "\n", - "The report highlights the model's abilities through: \n", - "* **Qualitative examples:** Showcasing impressive feats such as pinpointing specific code within the complete JAX codebase, learning to translate a new language from a single grammar book and dictionary, and identifying a scene from Les Misérables based on a hand-drawn sketch. \n", - "* **Quantitative evaluations:** \n", - " * **Diagnostic:** demonstrating near-perfect recall in \"needle-in-a-haystack\" tasks across text, video, and audio, even maintaining high recall with context lengths extending to 10 million tokens. \n", - " * **Realistic:** excelling in long-document QA using Les Misérables as context, outperforming competitors on long-video QA tasks, and showing significant progress in long-context automatic speech recognition. \n", - " * **Core Capabilities:** Surpassing the performance of its predecessor (Gemini 1.0) and rivaling or exceeding the performance of a state-of-the-art model, Gemini 1.0 Ultra, on core benchmarks related to coding, math, science, reasoning, and instruction following. \n", + "Key findings:\n", "\n", - "The report also delves into the responsible development and deployment of the model, emphasizing their approach to impact assessment, model mitigations, and ongoing safety evaluations. \n", + "* **Long-context capabilities:** Gemini 1.5 Pro significantly extends the context length frontier to multiple millions of tokens, outperforming other large language models like Claude 2.1 and GPT-4 Turbo. This allows for processing long documents, hours of video, and days of audio recordings. \n", + "* **Maintaining Core Capabilities:** This leap in long-context performance doesn't compromise the model's core multi-modal capabilities, showing improvements in math, science, reasoning, coding, image and video understanding. \n", + "* **In-Context Language Learning:** Gemini 1.5 Pro can learn to translate a new language from a single set of linguistic documentation provided in its input.\n", + "* **Benchmarks and Evaluations:** The document reports results on both synthetic and real-world long-context tasks, demonstrating the model's ability to recall and reason over long sequences in all three modalities. \n", + "* **Responsible Deployment:** The document outlines the model's development and deployment process, highlighting the focus on mitigating safety risks and ensuring responsible use. \n", "\n", - "In conclusion, Gemini 1.5 Pro represents a significant advancement in AI, showcasing unprecedented capabilities in long-context understanding across multiple modalities. The report emphasizes the need for novel evaluation methods to better assess the potential of such models and suggests promising avenues for future research. \n", + "The document also presents several challenges and opportunities for future research in evaluating models with very long contexts and developing benchmarks that can effectively assess their capabilities. They advocate for the development of more complex tasks that require reasoning over multiple pieces of information scattered across a long context. \n", "\n" ] } @@ -648,7 +636,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 37, "metadata": { "id": "52ltdcv5EsaM" }, @@ -660,7 +648,7 @@ "" ] }, - "execution_count": 31, + "execution_count": 37, "metadata": { "image/png": { "width": 450 @@ -679,7 +667,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 40, "metadata": { "id": "EEmrMpRMHyel" }, @@ -688,65 +676,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "## Analysis of Request:\n", - "\n", - "The task requires the model to:\n", - "\n", - "1. **Process Multimodal Information:** Analyze both an image and a PDF document provided as context.\n", - "2. **Understand Long Context:** The PDF document could potentially be lengthy, requiring the model to handle long sequences of text.\n", - "3. **Perform Information Retrieval:** Locate specific information within the image and document relevant to the questions.\n", - "4. **Reason and Answer Questions:** Provide concise answers based on the extracted information.\n", - "5. **Cite Sources:** Include page numbers from the PDF document to support the answers.\n", - "\n", - "## Potential Challenges: \n", - "\n", - "* **Image Understanding:** Accurately interpreting the content of the image, especially if it's complex or contains charts/graphs.\n", - "* **Long Document Comprehension:** Efficiently processing and retaining information from a potentially long PDF document.\n", - "* **Information Retrieval:** Identifying the specific sections within the document that contain relevant information for answering the questions.\n", - "* **Citation Generation:** Extracting and correctly formatting page numbers for citations. \n", - "\n", - "## Response Generation:\n", - "\n", - "Unfortunately, I cannot directly access external websites or specific files, including the image and PDF mentioned in the request. However, I can guide you on how to approach this task with a model like Gemini 1.5:\n", - "\n", - "**Step 1: Provide Context:** \n", - "\n", - "* Input the entire PDF document and the image as context to the model. You may need to convert the PDF to plain text or use a tool to extract text and images from the PDF.\n", + "- **What is in the given image?** Cumulative Average Reward \n", "\n", - "**Step 2: Formulate Questions:** \n", - "\n", - "* Ask the questions in a clear and concise manner. \n", - "\n", - "**Step 3: Extract Answers:** \n", - "\n", - "* Analyze the model's response to identify the answers to the questions. \n", - "* Look for phrases like \"The image shows...\" or \"According to page X of the document...\" to locate the answers and their corresponding sources.\n", - "\n", - "**Example Response Format:**\n", - "\n", - "**Question 1: What is in the given image?**\n", - "\n", - "**Answer:** The image shows a cumulative average plot with different colored lines representing various models' performance. (source: page X) \n", - "\n", - "**Question 2: Is there a similar graph in the given document?**\n", - "\n", - "**Answer:** Yes, a similar graph depicting cumulative average negative log-likelihood can be found on page Y of the document. \n", - "\n", - "\n", - "## Tools and Techniques:\n", - "\n", - "* **Optical Character Recognition (OCR):** If the image contains text, use OCR to extract and include it in the context.\n", - "* **PDF Parsing Libraries:** Utilize libraries like PyPDF2 or PDFMiner to extract text and images from the PDF document.\n", - "* **Regular Expressions:** Employ regular expressions to locate specific patterns in the model's response, such as page numbers for citations.\n", - "\n", - "## Model Considerations:\n", - "\n", - "* **Gemini 1.5 Pro:** This model is well-suited for this task due to its capabilities in handling long context, multimodal information processing, and question answering. \n", - "* **Alternative Models:** Other large language models like Bard or PaLM 2 could also be explored, although their performance may vary.\n", - "\n", - "## Conclusion:\n", - "\n", - "By following these steps and using the appropriate tools and techniques, you can effectively utilize a large language model to analyze multimodal information, answer questions, and cite sources from complex documents and images.\n", + "- **Is there a similar graph in the given document?** Yes (pg. 8) \n", "\n" ] } @@ -768,9 +700,12 @@ "Context:\n", "\"\"\"\n", "\n", + "pdf_file = Part.from_uri(pdf_file_uri, mime_type=\"application/pdf\")\n", + "image_file = Part.from_uri(image_file_uri, mime_type=\"image/png\")\n", + "\n", "contents = [\n", " pdf_file,\n", - " image_file_uri,\n", + " image_file,\n", " prompt,\n", "]\n", "\n", From 7f7847c3b1dfd5c98431f84ad97786546dcacd46 Mon Sep 17 00:00:00 2001 From: Rupjit Chakraborty Date: Wed, 25 Sep 2024 23:46:08 +0530 Subject: [PATCH 14/76] feat: Updated gemini/prompts/examples/chain_of_thought_react.ipynb notebook (#1146) # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Rupjit Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Co-authored-by: Eric Dong --- .../examples/chain_of_thought_react.ipynb | 136 ++++++++---------- 1 file changed, 63 insertions(+), 73 deletions(-) diff --git a/gemini/prompts/examples/chain_of_thought_react.ipynb b/gemini/prompts/examples/chain_of_thought_react.ipynb index b154d9005c..f00ab111cb 100644 --- a/gemini/prompts/examples/chain_of_thought_react.ipynb +++ b/gemini/prompts/examples/chain_of_thought_react.ipynb @@ -117,15 +117,15 @@ }, "outputs": [], "source": [ - "!pip install -q --user --upgrade langchain \\\n", - " google-cloud-aiplatform \\\n", + "!pip install -q --user --upgrade langchain==0.3.0 \\\n", + " google-cloud-aiplatform==1.67.1 \\\n", " prettyprinter==0.18.0 \\\n", " wikipedia==1.4.0 \\\n", - " google-cloud-bigquery==3.11.4 \\\n", - " langchain-core \\\n", - " langchain-google-vertexai \\\n", - " langchain-experimental \\\n", - " \"bigframes<1.0.0\"" + " google-cloud-bigquery==3.25.0 \\\n", + " langchain-core==0.3.2 \\\n", + " langchain-google-vertexai==2.0.1 \\\n", + " langchain-experimental==0.3.0 \\\n", + " bigframes==1.18.0" ] }, { @@ -216,9 +216,9 @@ }, "outputs": [], "source": [ - "PROJECT_ID = \"your-project-id\" # @param {type:\"string\"}\n", - "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", - "MODEL_NAME = \"gemini-1.0-pro\" # @param {type:\"string\"}\n", + "PROJECT_ID = \"\" # @param {type:\"string\"}\n", + "LOCATION = \"\" # @param {type:\"string\"}\n", + "MODEL_NAME = \"gemini-1.5-flash\" # @param {type:\"string\"}\n", "\n", "import vertexai\n", "\n", @@ -339,14 +339,15 @@ }, "outputs": [], "source": [ - "question = \"\"\"Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", - "Each can has 3 tennis balls. How many tennis balls does he have now?\n", - "A: The answer is 11.\n", - "Q: The cafeteria had 23 apples.\n", - "If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", - "A:\"\"\"\n", - "\n", - "llm.invoke(question)" + "question = \"\"\"\n", + " Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", + " Each can has 3 tennis balls. How many tennis balls does he have now?\n", + " A: The answer is 11.\n", + " Q: The cafeteria had 33 apples.\n", + " If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", + " A:\n", + "\"\"\"\n", + "print(llm.invoke(question))" ] }, { @@ -368,15 +369,17 @@ }, "outputs": [], "source": [ - "question = \"\"\"Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", - "Each can has 3 tennis balls. How many tennis balls does he have now?\n", - "A: Roger started with 5 balls. 2 cans of 3 tennis balls\n", - "each is 6 tennis balls. 5 + 6 = 11. The answer is 11.\n", - "Q: The cafeteria had 23 apples.\n", - "If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", - "A:\"\"\"\n", + "question = \"\"\"\n", + " Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", + " Each can has 3 tennis balls. How many tennis balls does he have now?\n", + " A: Roger started with 5 balls. 2 cans of 3 tennis balls\n", + " each is 6 tennis balls. 5 + 6 = 11. The answer is 11.\n", + " Q: The cafeteria had 23 apples.\n", + " If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", + " A:\n", + "\"\"\"\n", "\n", - "llm.invoke(question)" + "print(llm.invoke(question))" ] }, { @@ -423,14 +426,15 @@ }, "outputs": [], "source": [ - "question = \"\"\"Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", - "Each can has 3 tennis balls. How many tennis balls does he have now?\n", - "A: The answer is 11.\n", - "Q: The cafeteria had 23 apples.\n", - "If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", - "A:\"\"\"\n", - "\n", - "llm.invoke(question)" + "question = \"\"\"\n", + " Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", + " Each can has 3 tennis balls. How many tennis balls does he have now?\n", + " A: The answer is 11.\n", + " Q: The cafeteria had 23 apples.\n", + " If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", + " A:\n", + "\"\"\"\n", + "print(llm.invoke(question))" ] }, { @@ -441,15 +445,17 @@ }, "outputs": [], "source": [ - "question = \"\"\"Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", + "question = \"\"\"\n", + "Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls.\n", "Each can has 3 tennis balls. How many tennis balls does he have now?\n", "A: The answer is 11.\n", "\n", "Q: The cafeteria had 23 apples.\n", "If they used 20 to make lunch and bought 6 more, how many apples do they have?\n", - "A: Let's think step by step.\"\"\"\n", + "A: Let's think step by step.\n", + "\"\"\"\n", "\n", - "llm.invoke(question)" + "print(llm.invoke(question))" ] }, { @@ -825,6 +831,8 @@ }, "outputs": [], "source": [ + "from langchain.agents.agent_types import AgentType\n", + "from langchain.agents.initialize import initialize_agent\n", "from langchain.llms import VertexAI\n", "from langchain.tools import StructuredTool, WikipediaQueryRun\n", "from langchain.utilities import WikipediaAPIWrapper\n", @@ -908,7 +916,7 @@ }, "outputs": [], "source": [ - "llm = VertexAI(temperature=0)\n", + "from langchain.agents import load_tools\n", "\n", "_ = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())\n", "\n", @@ -923,7 +931,7 @@ "\n", "# Let's search some information generally available on wikipedia.\n", "agent.invoke(\n", - " \"Give me a summary of the life of one famous person who was born October 2nd?\"\n", + " \"Give me a short summary of a famous monument along with the name of the wikipedia page referenced\"\n", ")" ] }, @@ -974,7 +982,11 @@ }, "outputs": [], "source": [ - "QUERY = \"SELECT * FROM bigquery-public-data.hacker_news.full LIMIT 10\"\n", + "QUERY = \"\"\"\n", + "SELECT * FROM `bigquery-public-data.hacker_news.full` \n", + "WHERE CONCAT(title, url, text) IS NOT NULL\n", + "LIMIT 10\n", + "\"\"\"\n", "df = bq.query(QUERY).to_dataframe()\n", "df" ] @@ -1016,8 +1028,7 @@ " question = \"\"\"Create a 1 sentence friendly response to the following comment: {comment}\"\"\".format(\n", " comment=comment\n", " )\n", - " llm1 = VertexAI(temperature=0.2, max_output_tokens=150)\n", - " response = llm1.predict(question)\n", + " response = llm.predict(question)\n", "\n", " return response\n", "\n", @@ -1029,8 +1040,7 @@ " question = \"\"\"What is the sentiment of the comment (Negative, Positive, Neutral): {comment}\"\"\".format(\n", " comment=comment\n", " )\n", - " llm1 = VertexAI(temperature=0.2, max_output_tokens=150)\n", - " response = llm1.predict(question)\n", + " response = llm.predict(question)\n", "\n", " return response\n", "\n", @@ -1042,8 +1052,7 @@ " question = \"\"\"Put the comment into one of these categories (Technology, Politics, Products, News): {comment}\"\"\".format(\n", " comment=comment\n", " )\n", - " llm1 = VertexAI(temperature=0.2, max_output_tokens=150)\n", - " response = llm1.predict(question)\n", + " response = llm.predict(question)\n", "\n", " return response" ] @@ -1056,7 +1065,10 @@ }, "outputs": [], "source": [ + "from langchain.agents import Tool\n", + "\n", "# Create a pool of tools which will be utilized by the LLM to generate response.\n", + "\n", "tools = [\n", " Tool(\n", " name=\"GetCommentsById\",\n", @@ -1244,6 +1256,8 @@ }, "outputs": [], "source": [ + "from langchain.agents.agent import AgentExecutor\n", + "\n", "agent = ReActTestAgent.from_llm_and_tools(llm, tools, verbose=True)\n", "\n", "agent_executor = AgentExecutor.from_agent_and_tools(\n", @@ -1260,19 +1274,7 @@ }, "outputs": [], "source": [ - "input = \"Category for the comment 8885404\"\n", - "agent_executor.invoke(input)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "91K_tImIzCwp" - }, - "outputs": [], - "source": [ - "input = \"Sentiment for comment 8885404\"\n", + "input = \"Category for the comment 919518\"\n", "agent_executor.invoke(input)" ] }, @@ -1284,7 +1286,7 @@ }, "outputs": [], "source": [ - "input = \"Response for all comments by jpmoral.\"\n", + "input = \"Response for all comments by spicywebdesign.\"\n", "agent_executor.invoke(input)" ] }, @@ -1296,19 +1298,7 @@ }, "outputs": [], "source": [ - "input = \"Respond to the comment 9871807.\"\n", - "agent_executor.invoke(input)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LYrh5IR0Qp09" - }, - "outputs": [], - "source": [ - "input = \"Respond to all the comments by jpmoral.\"\n", + "input = \"Get comments by coleifer\"\n", "agent_executor.invoke(input)" ] } From 0cb3ac780fbfebebfa9779ba26dd16a9571a620d Mon Sep 17 00:00:00 2001 From: nhootan <103317089+nhootan@users.noreply.github.com> Date: Thu, 26 Sep 2024 10:48:00 -0400 Subject: [PATCH 15/76] feat: add gemini-1.5-pro:002 to the prompt optimizer notebook. (#1180) # Description --------- Co-authored-by: hootan Co-authored-by: Owl Bot --- .../vertex_ai_prompt_optimizer_ui.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb index 93d820f28e..a17a30b1c1 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "id": "NyKGtVQjgx13" }, @@ -202,9 +202,9 @@ "\n", "The most important settings are:\n", "\n", - "Target Model: Which model you are trying to optimize your prompts to.\n", - "Optimization Mode: The mode in which you are trying to optimize your prompt with.\n", - "Evaluation Metrics: The evaluation metrics in which you are trying to optimize your prompts against.\n", + "* Target Model: Which model you are trying to optimize your prompts to.\n", + "* Optimization Mode: The mode in which you are trying to optimize your prompt with.\n", + "* Evaluation Metrics: The evaluation metrics in which you are trying to optimize your prompts against.\n", "Refer [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#configuration) to learn more about the different configuration settings and how to best utilize them." ] }, @@ -216,8 +216,8 @@ }, "outputs": [], "source": [ - "SOURCE_MODEL = \"\" # @param [\"\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"]\n", - "TARGET_MODEL = \"gemini-1.5-flash-001\" # @param [\"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\"]\n", + "SOURCE_MODEL = \"\" # @param [\"\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.5-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"]\n", + "TARGET_MODEL = \"gemini-1.5-flash-001\" # @param [\"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.5-pro-002\", \"gemini-1.0-ultra-001\"]\n", "OPTIMIZATION_MODE = \"instruction_and_demo\" # @param [\"instruction\", \"demonstration\", \"instruction_and_demo\"]\n", "EVAL_METRIC = \"question_answering_correctness\" # @param [\"bleu\", \"coherence\", \"exact_match\", \"fluency\", \"groundedness\", \"text_quality\", \"verbosity\", \"rouge_1\", \"rouge_2\", \"rouge_l\", \"rouge_l_sum\", \"safety\", \"question_answering_correctness\", \"question_answering_quality\", \"summarization_quality\", \"tool_name_match\", \"tool_parameter_key_match\", \"tool_parameter_kv_match\", \"tool_call_valid\"] {type:\"string\"}" ] From 814aec5c52af44d03a4f8267afd4720a2b276f47 Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Thu, 26 Sep 2024 19:04:10 +0200 Subject: [PATCH 16/76] fix: utils module and notebook name (#1182) # Description Fixes: Download utils module and change notebook name. Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- ...b => vertex_ai_prompt_optimizer_sdk.ipynb} | 37 ++++++++++++++++--- 1 file changed, 31 insertions(+), 6 deletions(-) rename gemini/prompts/prompt_optimizer/{get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb => vertex_ai_prompt_optimizer_sdk.ipynb} (97%) diff --git a/gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb similarity index 97% rename from gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb rename to gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb index 98526b6428..35f16a7d7b 100644 --- a/gemini/prompts/prompt_optimizer/get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb @@ -29,26 +29,26 @@ "id": "JAPoU8Sm5E6e" }, "source": [ - "# Get started with Vertex Prompt Optimizer Notebook SDK (Preview)\n", + "# Vertex Prompt Optimizer Notebook SDK (Preview)\n", "\n", "\n", " \n", " \n", " \n", " \n", @@ -56,6 +56,17 @@ " " ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "0ccc35a93b9f" + }, + "source": [ + "| | | |\n", + "|-|-|-|\n", + "|Author | [Ivan Nardini](https://github.com/inardini)" + ] + }, { "cell_type": "markdown", "metadata": { @@ -156,6 +167,20 @@ "! pip3 install --upgrade --quiet 'asyncio' 'tqdm' 'tenacity' 'etils' 'importlib_resources' 'fsspec' 'gcsfs' 'nbformat>=4.2.0'" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "e55e2195ce2d" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " ! mkdir -p ./utils && wget https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/prompts/prompt_optimizer/utils/helpers.py -P ./utils" + ] + }, { "cell_type": "markdown", "metadata": { @@ -1135,7 +1160,7 @@ ], "metadata": { "colab": { - "name": "get_started_with_vertex_ai_prompt_optimizer_sdk.ipynb", + "name": "vertex_ai_prompt_optimizer_sdk.ipynb", "toc_visible": true }, "kernelspec": { From 5a4a10a1fed1b762d6a5a885cbff1c26a6774e2d Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Thu, 26 Sep 2024 15:44:10 -0400 Subject: [PATCH 17/76] fix: Use a stable version of Gemini 1.5 models (#1183) # Description Use a stable version of Gemini 1.5 models to mitigate instability issues. --- gemini/getting-started/intro_gemini_1_5_flash.ipynb | 2 +- gemini/getting-started/intro_gemini_1_5_pro.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gemini/getting-started/intro_gemini_1_5_flash.ipynb b/gemini/getting-started/intro_gemini_1_5_flash.ipynb index 2cc72107ee..588ffa9767 100644 --- a/gemini/getting-started/intro_gemini_1_5_flash.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_flash.ipynb @@ -255,7 +255,7 @@ }, "outputs": [], "source": [ - "MODEL_ID = \"gemini-1.5-flash\" # @param {type:\"string\"}\n", + "MODEL_ID = \"gemini-1.5-flash-002\" # @param {type:\"string\"}\n", "\n", "model = GenerativeModel(MODEL_ID)" ] diff --git a/gemini/getting-started/intro_gemini_1_5_pro.ipynb b/gemini/getting-started/intro_gemini_1_5_pro.ipynb index 1b2d1c36a3..f2d55a0cb0 100644 --- a/gemini/getting-started/intro_gemini_1_5_pro.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_pro.ipynb @@ -255,7 +255,7 @@ }, "outputs": [], "source": [ - "MODEL_ID = \"gemini-1.5-pro\" # @param {type:\"string\"}\n", + "MODEL_ID = \"gemini-1.5-pro-002\" # @param {type:\"string\"}\n", "\n", "model = GenerativeModel(MODEL_ID)" ] From 62bcd2a7f75f82a284f05e64c6e95b8be4aad964 Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Fri, 27 Sep 2024 15:40:59 +0800 Subject: [PATCH 18/76] minor updates (#1184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ ] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...ning_token_count_and_cost_estimation.ipynb | 76 ++++++++++++------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb index 27c7faf1a0..0d8e98a8b8 100644 --- a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb +++ b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb @@ -102,11 +102,26 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "metadata": { - "id": "tFy3H3aPgx12" + "id": "tFy3H3aPgx12", + "outputId": "cd2eca8a-1e2f-47b1-f421-2488d5a4d736", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/5.3 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m5.2/5.3 MB\u001b[0m \u001b[31m161.8 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.3/5.3 MB\u001b[0m \u001b[31m80.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h\u001b[33m WARNING: The script tb-gcp-uploader is installed in '/root/.local/bin' which is not on PATH.\n", + " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], "source": [ "%pip install --upgrade --user --quiet google-cloud-aiplatform[tokenization] numpy==1.26.4 tensorflow" ] @@ -126,20 +141,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { - "id": "XRvKdaPDTznN" + "id": "XRvKdaPDTznN", + "outputId": "f285bbb4-4da9-45ed-e79c-96a4e688fda8", + "colab": { + "base_uri": "https://localhost:8080/" + } }, "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ "{'status': 'ok', 'restart': True}" ] }, - "execution_count": 2, "metadata": {}, - "output_type": "execute_result" + "execution_count": 1 } ], "source": [ @@ -266,7 +285,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "f0JwfuPSSofK" }, @@ -290,9 +309,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { - "id": "PTvunHqRTHqe" + "id": "PTvunHqRTHqe", + "outputId": "7f67d2b6-40d2-4689-c90e-651d5732a5e4" }, "outputs": [ { @@ -360,7 +380,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "id": "1mzpB9PUVp5z" }, @@ -445,9 +465,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": { - "id": "pUCpEmEFM0eX" + "id": "pUCpEmEFM0eX", + "outputId": "20c9e84d-1276-463d-e716-0d0b913500c0" }, "outputs": [ { @@ -676,7 +697,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "uOWsUbwVXoTU" + "id": "uOWsUbwVXoTU", + "outputId": "40a94287-dd21-43f0-cee3-cb4a1820927b" }, "outputs": [ { @@ -740,7 +762,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "DVIpbaGYRJQc" + "id": "DVIpbaGYRJQc", + "outputId": "be75df17-ec76-4660-bf23-82c0cd051423" }, "outputs": [ { @@ -791,18 +814,17 @@ }, "outputs": [], "source": [ - "BASE_MODEL = \"gemini-1.5-pro-001\" # @param ['gemini-1.5-pro-001']{type:\"string\"}\n", + "BASE_MODEL = \"gemini-1.5-pro-002\" # @param ['gemini-1.5-pro-002']{type:\"string\"}\n", "training_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/train_sft_train_samples.jsonl\" # @param {type:\"string\"}\n", - "validation_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}\n", - "\n", - "tokenizer = get_tokenizer_for_model(BASE_MODEL)" + "validation_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": { - "id": "p-soJC81YNy2" + "id": "p-soJC81YNy2", + "outputId": "ba2ee710-f3d9-4ec2-a670-679aca12e0d8" }, "outputs": [ { @@ -844,7 +866,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": { "id": "K0YOqIbtXwGI" }, @@ -929,9 +951,10 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { - "id": "RppMRP9lIkq2" + "id": "RppMRP9lIkq2", + "outputId": "d6cc0e50-830b-4094-89dd-3e400ec6d416" }, "outputs": [ { @@ -958,7 +981,8 @@ "DF4l8DTdWgPY" ], "name": "vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb", - "toc_visible": true + "toc_visible": true, + "provenance": [] }, "kernelspec": { "display_name": "Python 3", @@ -967,4 +991,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 638e68e680d636ebce7ac5b88784007614674384 Mon Sep 17 00:00:00 2001 From: Ariel Jassan Date: Fri, 27 Sep 2024 16:10:21 +0300 Subject: [PATCH 19/76] refactor: move python notebook to gemini/prompts folder (#1179) Moving the python notebook to the folder agreed up on https://github.com/GoogleCloudPlatform/generative-ai/pull/1027#pullrequestreview-2325741634 --------- Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- ...ale_with_gemini_batch_prediction_api.ipynb | 8 ++-- ...ning_token_count_and_cost_estimation.ipynb | 41 ++++++------------- 2 files changed, 17 insertions(+), 32 deletions(-) rename gemini/{evaluation/legacy => prompts}/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb (97%) diff --git a/gemini/evaluation/legacy/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb b/gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb similarity index 97% rename from gemini/evaluation/legacy/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb rename to gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb index 1ab31d7e01..47b3ebc633 100644 --- a/gemini/evaluation/legacy/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb +++ b/gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb @@ -33,22 +33,22 @@ "\n", "
\n", - " \n", + " \n", " \"Google
Open in Colab\n", "
\n", "
\n", - " \n", + " \n", " \"Google
Open in Colab Enterprise\n", "
\n", "
\n", - " \n", + " \n", " \"Vertex
Open in Vertex AI Workbench\n", "
\n", "
\n", - " \n", + " \n", " \"GitHub
View on GitHub\n", "
\n", "
\n", " \n", " \n", " \n", " \n", diff --git a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb index 0d8e98a8b8..1af8cb4092 100644 --- a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb +++ b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb @@ -104,16 +104,12 @@ "cell_type": "code", "execution_count": 3, "metadata": { - "id": "tFy3H3aPgx12", - "outputId": "cd2eca8a-1e2f-47b1-f421-2488d5a4d736", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "tFy3H3aPgx12" }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/5.3 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m5.2/5.3 MB\u001b[0m \u001b[31m161.8 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.3/5.3 MB\u001b[0m \u001b[31m80.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h\u001b[33m WARNING: The script tb-gcp-uploader is installed in '/root/.local/bin' which is not on PATH.\n", @@ -143,22 +139,18 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "id": "XRvKdaPDTznN", - "outputId": "f285bbb4-4da9-45ed-e79c-96a4e688fda8", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "XRvKdaPDTznN" }, "outputs": [ { - "output_type": "execute_result", "data": { "text/plain": [ "{'status': 'ok', 'restart': True}" ] }, + "execution_count": 1, "metadata": {}, - "execution_count": 1 + "output_type": "execute_result" } ], "source": [ @@ -311,8 +303,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "PTvunHqRTHqe", - "outputId": "7f67d2b6-40d2-4689-c90e-651d5732a5e4" + "id": "PTvunHqRTHqe" }, "outputs": [ { @@ -467,8 +458,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "pUCpEmEFM0eX", - "outputId": "20c9e84d-1276-463d-e716-0d0b913500c0" + "id": "pUCpEmEFM0eX" }, "outputs": [ { @@ -697,8 +687,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "uOWsUbwVXoTU", - "outputId": "40a94287-dd21-43f0-cee3-cb4a1820927b" + "id": "uOWsUbwVXoTU" }, "outputs": [ { @@ -762,8 +751,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "DVIpbaGYRJQc", - "outputId": "be75df17-ec76-4660-bf23-82c0cd051423" + "id": "DVIpbaGYRJQc" }, "outputs": [ { @@ -823,8 +811,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "p-soJC81YNy2", - "outputId": "ba2ee710-f3d9-4ec2-a670-679aca12e0d8" + "id": "p-soJC81YNy2" }, "outputs": [ { @@ -953,8 +940,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "RppMRP9lIkq2", - "outputId": "d6cc0e50-830b-4094-89dd-3e400ec6d416" + "id": "RppMRP9lIkq2" }, "outputs": [ { @@ -981,8 +967,7 @@ "DF4l8DTdWgPY" ], "name": "vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb", - "toc_visible": true, - "provenance": [] + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", @@ -991,4 +976,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From c98ba59fef5137b8431b668bae279ecfa0b16bb8 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Fri, 27 Sep 2024 17:09:33 -0400 Subject: [PATCH 20/76] fix: Correct a model version used in the safety notebook (#1185) # Description Correct a model version used in the safety notebook Gemini 1.0 > Gemini 1.5 --------- Co-authored-by: Owl Bot --- gemini/responsible-ai/gemini_safety_ratings.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/responsible-ai/gemini_safety_ratings.ipynb b/gemini/responsible-ai/gemini_safety_ratings.ipynb index 0105f34fd7..122de5d9f8 100644 --- a/gemini/responsible-ai/gemini_safety_ratings.ipynb +++ b/gemini/responsible-ai/gemini_safety_ratings.ipynb @@ -270,7 +270,7 @@ "id": "5rpgrqQrPJQ-" }, "source": [ - "### Load the Gemini 1.0 Pro model\n" + "### Load the Gemini 1.5 Pro model\n" ] }, { From a04e98df0f4cd5638e5144718a5d18d8b7a76471 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Fri, 27 Sep 2024 17:48:17 -0400 Subject: [PATCH 21/76] feat: Add safety settings changes for the new models (#1188) # Description Add safety settings changes for the new Gemini 1.5 Flash 002 and Gemini 1.5 Pro 002 models: - The safety settings are `OFF` by default - The default block thresholds are `BLOCK_NONE` --- .../getting-started/intro_gemini_python.ipynb | 121 +++++++++++++++--- 1 file changed, 102 insertions(+), 19 deletions(-) diff --git a/gemini/getting-started/intro_gemini_python.ipynb b/gemini/getting-started/intro_gemini_python.ipynb index 2007da32de..9080ff3695 100644 --- a/gemini/getting-started/intro_gemini_python.ipynb +++ b/gemini/getting-started/intro_gemini_python.ipynb @@ -111,21 +111,6 @@ " - Generate text from video and text prompt\n" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "C9nEPojogw-g" - }, - "source": [ - "### Costs\n", - "\n", - "This tutorial uses billable components of Google Cloud:\n", - "\n", - "- Vertex AI\n", - "\n", - "Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.\n" - ] - }, { "cell_type": "markdown", "metadata": { @@ -174,7 +159,6 @@ }, "outputs": [], "source": [ - "# Restart kernel after installs so that your environment can access the new packages\n", "import IPython\n", "\n", "app = IPython.Application.instance()\n", @@ -213,9 +197,7 @@ "source": [ "import sys\n", "\n", - "# Additional authentication is required for Google Colab\n", "if \"google.colab\" in sys.modules:\n", - " # Authenticate user to Google Cloud\n", " from google.colab import auth\n", "\n", " auth.authenticate_user()" @@ -268,7 +250,15 @@ }, "outputs": [], "source": [ - "from vertexai.generative_models import GenerationConfig, GenerativeModel, Image, Part" + "from vertexai.generative_models import (\n", + " GenerationConfig,\n", + " GenerativeModel,\n", + " HarmBlockThreshold,\n", + " HarmCategory,\n", + " Image,\n", + " Part,\n", + " SafetySetting,\n", + ")" ] }, { @@ -511,6 +501,99 @@ "print(response.text)" ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "7bff84b3f1c3" + }, + "source": [ + "### Safety filters\n", + "\n", + "The Gemini API provides safety filters that you can adjust across multiple filter categories to restrict or allow certain types of content. You can use these filters to adjust what's appropriate for your use case. See the [Configure safety filters](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters) page for details.\n", + "\n", + "When you make a request to Gemini, the content is analyzed and assigned a safety rating. You can inspect the safety ratings of the generated content by printing out the model responses, as in this example:" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": { + "id": "6548f7974b26" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Safety ratings:\n", + "[category: HARM_CATEGORY_HATE_SPEECH\n", + "probability: NEGLIGIBLE\n", + "probability_score: 0.0693359375\n", + "severity: HARM_SEVERITY_NEGLIGIBLE\n", + "severity_score: 0.046630859375\n", + ", category: HARM_CATEGORY_DANGEROUS_CONTENT\n", + "probability: NEGLIGIBLE\n", + "probability_score: 0.09130859375\n", + "severity: HARM_SEVERITY_NEGLIGIBLE\n", + "severity_score: 0.0693359375\n", + ", category: HARM_CATEGORY_HARASSMENT\n", + "probability: NEGLIGIBLE\n", + "probability_score: 0.11767578125\n", + "severity: HARM_SEVERITY_NEGLIGIBLE\n", + "severity_score: 0.0267333984375\n", + ", category: HARM_CATEGORY_SEXUALLY_EXPLICIT\n", + "probability: NEGLIGIBLE\n", + "probability_score: 0.1435546875\n", + "severity: HARM_SEVERITY_NEGLIGIBLE\n", + "severity_score: 0.0289306640625\n", + "]\n" + ] + } + ], + "source": [ + "response = model.generate_content(\"Why is the sky blue?\")\n", + "\n", + "print(f\"Safety ratings:\\n{response.candidates[0].safety_ratings}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3fe5bb6d26c8" + }, + "source": [ + "In Gemini 1.5 Flash 002 and Gemini 1.5 Pro 002, the safety settings are `OFF` by default and the default block thresholds are `BLOCK_NONE`.\n", + "\n", + "You can use `safety_settings` to adjust the safety settings for each request you make to the API. This example demonstrates how you set the block threshold to BLOCK_ONLY_HIGH for the dangerous content category:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4c055f9f41a5" + }, + "outputs": [], + "source": [ + "safety_settings = [\n", + " SafetySetting(\n", + " category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,\n", + " threshold=HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", + " ),\n", + "]\n", + "\n", + "prompt = \"\"\"\n", + " Write a list of 2 disrespectful things that I might say to the universe after stubbing my toe in the dark.\n", + "\"\"\"\n", + "\n", + "response = model.generate_content(\n", + " prompt,\n", + " safety_settings=safety_settings,\n", + ")\n", + "\n", + "print(response)" + ] + }, { "cell_type": "markdown", "metadata": { From 185c74d75650cffc067429be93a9965dfee8fc19 Mon Sep 17 00:00:00 2001 From: Averi Kitsch Date: Fri, 27 Sep 2024 16:01:25 -0700 Subject: [PATCH 22/76] fix: update Reasoning Engine tutorials with LangChain packages (#1191) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ ] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --------- Co-authored-by: Owl Bot --- .../tutorial_alloydb_rag_agent.ipynb | 15 +++++++++------ .../tutorial_cloud_sql_pg_rag_agent.ipynb | 17 ++++++++++------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb b/gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb index 94283718df..f174b9eba8 100644 --- a/gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb +++ b/gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb @@ -128,7 +128,7 @@ }, "outputs": [], "source": [ - "!pip install --upgrade --quiet \"google-cloud-aiplatform[reasoningengine,langchain]\" langchain-google-alloydb-pg langchain-google-vertexai" + "%pip install --upgrade --quiet \"langchain-google-alloydb-pg>=0.7.0\" \"google-cloud-aiplatform[reasoningengine,langchain]\" langchain-google-vertexai langchain-community" ] }, { @@ -401,7 +401,9 @@ " password=PASSWORD,\n", ")\n", "\n", - "await engine._aexecute_outside_tx(f\"CREATE DATABASE {DATABASE}\")" + "async with engine._pool.connect() as conn:\n", + " await conn.execute(text(\"COMMIT\"))\n", + " await conn.execute(text(f\"CREATE DATABASE {DATABASE}\"))" ] }, { @@ -570,7 +572,8 @@ " PROJECT_ID, REGION, CLUSTER, INSTANCE, DATABASE, user=\"postgres\", password=PASSWORD\n", ")\n", "\n", - "await engine._aexecute(f'GRANT SELECT ON {TABLE_NAME} TO \"{IAM_USER}\";')" + "async with engine._pool.connect() as conn:\n", + " await conn.execute(text(f'GRANT SELECT ON {TABLE_NAME} TO \"{IAM_USER}\";'))" ] }, { @@ -585,7 +588,7 @@ "\n", "In this example, you'll define a function that will retrieve similar documents from the vector store using semantic search.\n", "\n", - "For improved security measures, the tool wil use IAM-based authentication to authenticate to the databases instead of using the built-in user/password authentication." + "For improved security measures, the tool will use IAM-based authentication to authenticate to the databases instead of using the built-in user/password authentication." ] }, { @@ -659,8 +662,8 @@ " },\n", " ),\n", " requirements=[\n", - " \"google-cloud-aiplatform[reasoningengine,langchain]==1.57.0\",\n", - " \"langchain-google-alloydb-pg==0.4.1\",\n", + " \"google-cloud-aiplatform[reasoningengine,langchain]==1.68.0\",\n", + " \"langchain-google-alloydb-pg==0.7.0\",\n", " \"langchain-google-vertexai==1.0.4\",\n", " ],\n", " display_name=\"PrebuiltAgent\",\n", diff --git a/gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb b/gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb index 8b2d165a84..46173dbe73 100644 --- a/gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb +++ b/gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb @@ -128,7 +128,7 @@ }, "outputs": [], "source": [ - "!pip install --upgrade --quiet \"google-cloud-aiplatform[reasoningengine,langchain]\" langchain-google-cloud-sql-pg langchain-google-vertexai" + "!pip install --upgrade --quiet \"langchain-google-cloud-sql-pg>=0.10.0\" \"google-cloud-aiplatform[reasoningengine,langchain]\" langchain-google-vertexai langchain-community" ] }, { @@ -345,7 +345,9 @@ " password=PASSWORD,\n", ")\n", "\n", - "await engine._aexecute_outside_tx(f\"CREATE DATABASE {DATABASE}\")" + "async with engine._pool.connect() as conn:\n", + " await conn.execute(text(\"COMMIT\"))\n", + " await conn.execute(text(f\"CREATE DATABASE {DATABASE}\"))" ] }, { @@ -513,7 +515,8 @@ " PROJECT_ID, REGION, INSTANCE, DATABASE, user=\"postgres\", password=PASSWORD\n", ")\n", "\n", - "await engine._aexecute(f'GRANT SELECT ON {TABLE_NAME} TO \"{IAM_USER}\";')" + "async with engine._pool.connect() as conn:\n", + " await conn.execute(text(f'GRANT SELECT ON {TABLE_NAME} TO \"{IAM_USER}\";'))" ] }, { @@ -528,7 +531,7 @@ "\n", "In this example, you'll define a function that will retrieve similar documents from the vector store using semantic search.\n", "\n", - "For improved security measures, the tool wil use IAM-based authentication to authenticate to the databases instead of using the built-in user/password authentication." + "For improved security measures, the tool will use IAM-based authentication to authenticate to the databases instead of using the built-in user/password authentication." ] }, { @@ -602,9 +605,9 @@ " },\n", " ),\n", " requirements=[\n", - " \"google-cloud-aiplatform[reasoningengine,langchain]==1.57.0\",\n", - " \"langchain-google-cloud-sql-pg==0.6.1\",\n", - " \"langchain-google-vertexai==1.0.4\",\n", + " \"google-cloud-aiplatform[reasoningengine,langchain]==1.68.0\",\n", + " \"langchain-google-cloud-sql-pg==0.10.0\",\n", + " \"langchain-google-vertexai==1.0.10\",\n", " ],\n", " display_name=\"PrebuiltAgent\",\n", ")" From eacc4fd220f5b682e2efe370a901094a29880595 Mon Sep 17 00:00:00 2001 From: Andrew Grande Date: Fri, 27 Sep 2024 16:03:54 -0700 Subject: [PATCH 23/76] docs: Example Selectors page has moved (#1186) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ ] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 Co-authored-by: Kristopher Overholt --- gemini/orchestration/intro_langchain_gemini.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/orchestration/intro_langchain_gemini.ipynb b/gemini/orchestration/intro_langchain_gemini.ipynb index fadcc7849b..dff319a923 100644 --- a/gemini/orchestration/intro_langchain_gemini.ipynb +++ b/gemini/orchestration/intro_langchain_gemini.ipynb @@ -714,7 +714,7 @@ "\n", "[Example selectors](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/) are an easy way to select from a series of examples to dynamically place in-context information into your prompt. Often used when the task is nuanced or has a large list of examples.\n", "\n", - "Check out different types of example selectors [here](https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/)\n" + "Check out different types of example selectors [here](https://python.langchain.com/docs/how_to/example_selectors/)\n" ] }, { From 438aaad6838fec390065d0252a3d32a75dba5d91 Mon Sep 17 00:00:00 2001 From: Kristopher Overholt Date: Mon, 30 Sep 2024 10:08:17 -0500 Subject: [PATCH 24/76] docs: Add sample media to Gemini multimodal function calling notebook (#1168) # Description This is a followup PR to #1162 that adds sample images and content to each section that helps add context to the analysis performed (e.g., screenshot, GIF of video, transcript). - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .../multimodal_function_calling.ipynb | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/gemini/function-calling/multimodal_function_calling.ipynb b/gemini/function-calling/multimodal_function_calling.ipynb index 0c7759da95..0a287fe055 100644 --- a/gemini/function-calling/multimodal_function_calling.ipynb +++ b/gemini/function-calling/multimodal_function_calling.ipynb @@ -78,8 +78,6 @@ "\n", "This notebook demonstrates a powerful [Function Calling](https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/function-calling) capability of the Gemini model: support for multimodal inputs. With multimodal function calling, you can go beyond traditional text inputs, enabling Gemini to understand your intent and predict function calls and function parameters based on various inputs like images, audio, video, and PDFs. Function calling can also be referred to as *function calling with controlled generation*, which guarantees that output generated by the model always adheres to a specific schema so that you receive consistently formatted responses.\n", "\n", - "You can To specify the structure of a model's output, define a response schema, which works like a blueprint for model responses. When you submit a prompt and include the response schema, the model's response always follows your defined schema.\n", - "\n", "Previously, implementing multimodal function calling required two separate calls to the Gemini API: one to extract information from media, and another to generate a function call based on the extracted text. This process was cumbersome, prone to errors, and resulted in the loss of detail in valuable contextual information. Gemini's multimodal function calling capability streamlines this workflow, enabling a single API call that efficiently processes multimodal inputs for accurate function predictions and structured outputs. \n", "\n", "### How It Works\n", @@ -299,6 +297,8 @@ "\n", "In this example, you'll send along an image of a bird and ask Gemini to identify its habitat. This involves defining a function that looks up regions where a given animal is found, creating a tool that uses this function, and then sending a request to Gemini.\n", "\n", + "\n", + "\n", "First, you define a `FunctionDeclaration` called `get_wildlife_region`. This function takes the name of an animal species as input and returns information about its typical region." ] }, @@ -586,6 +586,8 @@ "source": [ "Now let's explore how Gemini can extract information from videos for the purpose of invoking a function call. You'll use a video showcasing multiple products and ask Gemini to identify its key features.\n", "\n", + "\n", + "\n", "Start by defining a function called `get_feature_info` that takes a list of product features as input and could potentially be used to retrieve additional details about those features:" ] }, @@ -814,6 +816,13 @@ "source": [ "In this example, you'll explore using audio input with Gemini's multimodal function calling. You'll send a podcast episode to Gemini and ask for book recommendations related to the topics discussed.\n", "\n", + ">>> \"SRE is just a production system specific manifestation of systems thinking ... and we kind of do it in an informal way.\"
\n", + ">>> \"The book called 'Thinking in Systems' ... it's a really good primer on this topic.\"
\n", + ">>> \"An example of ... systems structure behavior thinking ... is the idea of like the cascading failure, that kind of vicious cycle of load that causes retries that causes more load ... \"
\n", + ">>> \"The worst pattern is the single embedded SRE that turns into the ops person ... you just end up doing all of the toil, all of the grunt work.\"
\n", + ">>> \"Take that moment, take a breath, and really analyze the problem and understand how it's working as a system and understand how you can intervene to improve that.\"
\n", + ">>> \"Avoid just doing what you've done before and kicking the can down the road, and really think deeply about your problems.\"
\n", + "\n", "Define a function called `get_recommended_books` that takes a list of topics as input and (hypothetically) returns relevant book recommendations:" ] }, @@ -1026,6 +1035,8 @@ "source": [ "This example demonstrates how to use Gemini's multimodal function calling to process PDF documents. You'll work with a set of invoices and extract the names of the (fictitious) companies involved.\n", "\n", + "\n", + "\n", "Define a function called `get_company_information` that (in a real-world scenario) could be used to fetch details about a given list of companies:" ] }, @@ -1254,6 +1265,8 @@ "source": [ "Let's put it all together and build a simple multimodal chatbot. This chatbot will understand image inputs and respond to questions using the functions you define.\n", "\n", + "\n", + "\n", "First, define three functions: `get_animal_details`, `get_location_details`, and `check_color_palette`. These functions represent the capabilities of your chatbot and could potentially be used to retrieve additional details using REST API calls:" ] }, From 9fd8ea58faede85271cce18ac25baf792474c8a8 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 30 Sep 2024 17:11:35 +0200 Subject: [PATCH 25/76] chore(deps): update dependency faker to v30 (#1178) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [faker](https://redirect.github.com/joke2k/faker) ([changelog](https://redirect.github.com/joke2k/faker/blob/master/CHANGELOG.md)) | `29.0.0` -> `30.0.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/faker/30.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/faker/30.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/faker/29.0.0/30.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/faker/29.0.0/30.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
joke2k/faker (faker) ### [`v30.0.0`](https://redirect.github.com/joke2k/faker/blob/HEAD/CHANGELOG.md#v3000---2024-09-25) [Compare Source](https://redirect.github.com/joke2k/faker/compare/v29.0.0...v30.0.0) - Force the slug always be generated with ASCII characters. Thanks [@​Pandede](https://redirect.github.com/Pandede).
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). --------- Co-authored-by: Eric Dong Co-authored-by: Owl Bot --- gemini/sample-apps/llamaindex-rag/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/sample-apps/llamaindex-rag/pyproject.toml b/gemini/sample-apps/llamaindex-rag/pyproject.toml index c924966cd2..9d6d3e007b 100644 --- a/gemini/sample-apps/llamaindex-rag/pyproject.toml +++ b/gemini/sample-apps/llamaindex-rag/pyproject.toml @@ -59,7 +59,7 @@ dulwich = "0.21.7" email-validator = "2.2.0" entrypoints = "0.4" exceptiongroup = "1.2.2" -faker = "29.0.0" +faker = "30.0.0" fastapi = "0.111.1" fastapi-cli = "0.0.4" fastjsonschema = "2.20.0" From cb854717643d2f2ed008d0c25958b91c2f984111 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 30 Sep 2024 17:21:39 +0200 Subject: [PATCH 26/76] chore(deps): update dependency @types/express to v5 (#1176) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [@types/express](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/express) ([source](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/express)) | [`^4.17.21` -> `^5.0.0`](https://renovatebot.com/diffs/npm/@types%2fexpress/4.17.21/5.0.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@types%2fexpress/5.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@types%2fexpress/5.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@types%2fexpress/4.17.21/5.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@types%2fexpress/4.17.21/5.0.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: Eric Dong --- .../genwealth/api/package-lock.json | 18 ++++++++++-------- gemini/sample-apps/genwealth/api/package.json | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/gemini/sample-apps/genwealth/api/package-lock.json b/gemini/sample-apps/genwealth/api/package-lock.json index 354b3fa705..6cc7460305 100644 --- a/gemini/sample-apps/genwealth/api/package-lock.json +++ b/gemini/sample-apps/genwealth/api/package-lock.json @@ -21,7 +21,7 @@ }, "devDependencies": { "@types/cors": "^2.8.17", - "@types/express": "^4.17.21", + "@types/express": "^5.0.0", "@types/lodash": "^4.17.0", "@types/multer": "^1.4.11", "@types/pg": "^8.11.2", @@ -1048,22 +1048,24 @@ } }, "node_modules/@types/express": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", - "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.0.tgz", + "integrity": "sha512-DvZriSMehGHL1ZNLzi6MidnsDhUZM/x2pRdDIKdwbUNqqwHxMlRdkxtn6/EPKyqKpHqTl/4nRZsRNLpZxZRpPQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", + "@types/express-serve-static-core": "^5.0.0", "@types/qs": "*", "@types/serve-static": "*" } }, "node_modules/@types/express-serve-static-core": { - "version": "4.17.43", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.43.tgz", - "integrity": "sha512-oaYtiBirUOPQGSWNGPWnzyAFJ0BP3cwvN4oWZQY+zUBwpVIGsKUkpBpSztp74drYcjavs7SKFZ4DX1V2QeN8rg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.0.0.tgz", + "integrity": "sha512-AbXMTZGt40T+KON9/Fdxx0B2WK5hsgxcfXJLr5bFpZ7b4JCex2WyQPTEKdXqfHiY5nKKBScZ7yCoO6Pvgxfvnw==", "dev": true, + "license": "MIT", "dependencies": { "@types/node": "*", "@types/qs": "*", diff --git a/gemini/sample-apps/genwealth/api/package.json b/gemini/sample-apps/genwealth/api/package.json index d7a24e4428..e348a5e71c 100644 --- a/gemini/sample-apps/genwealth/api/package.json +++ b/gemini/sample-apps/genwealth/api/package.json @@ -23,7 +23,7 @@ }, "devDependencies": { "@types/cors": "^2.8.17", - "@types/express": "^4.17.21", + "@types/express": "^5.0.0", "@types/lodash": "^4.17.0", "@types/multer": "^1.4.11", "@types/pg": "^8.11.2", From c550eba589654999aa962843bb9455f9836ae10b Mon Sep 17 00:00:00 2001 From: Jorj Ismailyan <130348293+jismailyan-google@users.noreply.github.com> Date: Mon, 30 Sep 2024 08:38:21 -0700 Subject: [PATCH 27/76] feat: Add the notebook tutorial for Vertex Image Segmentation (#1152) # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Holt Skinner Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- .github/actions/spelling/allow.txt | 8 + .../getting-started/image_segmentation.ipynb | 826 ++++++++++++++++++ 2 files changed, 834 insertions(+) create mode 100644 vision/getting-started/image_segmentation.ipynb diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 3c4bb080ba..160a5dd119 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -300,11 +300,13 @@ baxis bbc bigquery bitcoin +boundings bpa bqml carbonara caudatus caxis +cctv cfbundle chatbots claude @@ -346,6 +348,7 @@ drinkware dropdown dropna dsl +dtype dtypes dwmapi ecommerce @@ -368,6 +371,7 @@ figsize fillmode firestore flac +floormat fmeasure fontdict forno @@ -378,6 +382,7 @@ fromiter fts fulltext funtion +gapic gboolean gchar gcloud @@ -422,6 +427,7 @@ idk idks idxs iloc +imagefont imageno imdb imshow @@ -529,6 +535,7 @@ projectid protobuf pstotext pubspec +putalpha putdata pvc pyautogen @@ -569,6 +576,7 @@ siglap sittin sklearn sku +snowboard sourced srlimit ssd diff --git a/vision/getting-started/image_segmentation.ipynb b/vision/getting-started/image_segmentation.ipynb new file mode 100644 index 0000000000..e0c062d1e4 --- /dev/null +++ b/vision/getting-started/image_segmentation.ipynb @@ -0,0 +1,826 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uxCkB_DXTHzf" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hny4I-ODTIS6" + }, + "source": [ + "# Image Segmentation on Vertex AI\n", + "\n", + "\n", + "
\n", - " \n", + " \n", " \"Google
Open in Colab\n", "
\n", "
\n", - " \n", + " \n", " \"Google
Open in Colab Enterprise\n", "
\n", "
\n", - " \n", + " \n", " \"Vertex
Open in Workbench\n", "
\n", "
\n", - " \n", + " \n", " \"GitHub
View on GitHub\n", "
\n", "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XHQ6zSOLt102" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author | [Jorj Ismailyan](https://github.com/jismailyan-google) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-nLS57E2TO5y" + }, + "source": [ + "## Overview\n", + "\n", + "Vertex Image Segmentation brings Google's state of the art segmentation models to developers as a scalable and reliable service.\n", + "\n", + "With Image Segmentation, developers can choose from five different modes to segment images and build AI products, including with a **text prompt** and **interactive** mode.\n", + "\n", + "Learn more about [Image Segmentation on Vertex](https://docs.google.com/document/d/1y5H_m29zGM3Xt6ba2lMw_di6bpbvtQagpU-xY30Kx78/edit?resourcekey=0-_-4WVkfl0oS3nfBwIEhWWQ&tab=t.0).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iXsvgIuwTPZw" + }, + "source": [ + "### Objectives\n", + "\n", + "In this notebook, you will be exploring the features of Vertex Image Segmentation using the Vertex AI Python SDK. You will\n", + "\n", + "- Segment the foreground or background of an object\n", + " - Create a product image by removing the background\n", + " - Change the background color of an image\n", + "- Control the generated mask by configuring dilation\n", + "- Use an open-vocabulary text prompt to perform:\n", + " - Object detection\n", + " - Instance segmentation\n", + "- Draw a scribble to guide segmentation\n", + " - Perform point-to-mask segmentation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "skXAu__iqks_" + }, + "source": [ + "### Costs\n", + "\n", + "- This notebook uses billable components of Google Cloud:\n", + " - Vertex AI\n", + "\n", + "- Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mvKl-BtQTRiQ" + }, + "source": [ + "## Getting Started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B-UOCMvJdmlq" + }, + "source": [ + "### Install Vertex AI SDK for Python (Jupyter only)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "u5lOntr-doIT" + }, + "outputs": [], + "source": [ + "! pip3 install --upgrade --user google-cloud-aiplatform" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2tuaVGUJdsMm" + }, + "source": [ + "### Restart runtime (Jupyter only)\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XAp-TR9mdqw2" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "opUxT_k5TdgP" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you are running this notebook on Google Colab, run the following cell to authenticate your environment. This step is not required if you are using [Vertex AI Workbench](https://cloud.google.com/vertex-ai-workbench)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vbNgv4q1T2Mi" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "# Additional authentication is required for Google Colab\n", + "if \"google.colab\" in sys.modules:\n", + " # Authenticate user to Google Cloud\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ybBXSukZkgjg" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and enable the Vertex AI API.\n", + "\n", + "Learn more about setting up a project and a development environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "q7YvbXXdtzDT" + }, + "outputs": [], + "source": [ + "from google.cloud import aiplatform\n", + "from google.cloud.aiplatform.gapic import PredictResponse\n", + "\n", + "PROJECT_ID = \"\" # @param {type:\"string\"}\n", + "LOCATION = \"us-central1\" # @param [\"asia-northeast1\", \"asia-northeast3\", \"asia-southeast1\", \"europe-west1\", \"europe-west2\", \"europe-west3\", \"europe-west4\", \"europe-west9\", \"northamerica-northeast1\", \"us-central1\", \"us-east4\", \"us-west1\", \"us-west4\"]\n", + "\n", + "aiplatform.init(project=PROJECT_ID, location=LOCATION)\n", + "\n", + "api_regional_endpoint = f\"{LOCATION}-aiplatform.googleapis.com\"\n", + "client_options = {\"api_endpoint\": api_regional_endpoint}\n", + "client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)\n", + "\n", + "model_endpoint = f\"projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/image-segmentation-001\"\n", + "print(f\"Prediction client initiated on project {PROJECT_ID} in {LOCATION}.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ju_PctW22NUl" + }, + "outputs": [], + "source": [ + "import base64\n", + "\n", + "# @title Import libraries\n", + "# @markdown Run this cell before proceeding to import libraries and define utility functions.\n", + "import io\n", + "import random\n", + "import timeit\n", + "\n", + "from IPython.display import display\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "import matplotlib.pyplot as plt\n", + "\n", + "\n", + "# Parses the generated mask bytes from the response prediction and converts it\n", + "# to an Image PIL object.\n", + "def prediction_to_mask_pil(prediction) -> Image:\n", + " encoded_mask_string = prediction[\"bytesBase64Encoded\"]\n", + " mask_bytes = base64.b64decode(encoded_mask_string)\n", + " mask_pil = Image.open(io.BytesIO(mask_bytes))\n", + " mask_pil.thumbnail((640, 640))\n", + " return mask_pil\n", + "\n", + "\n", + "# Extracts masks from the response and overlays them onto the base image.\n", + "def overlay(input_image: Image, response: PredictResponse) -> Image:\n", + " # Make the original image colors grayscale so the overlayed masks are easier to see\n", + " overlayed_image = INPUT_IMAGE_PIL.copy().convert(\"L\").convert(\"RGB\")\n", + "\n", + " for prediction in response.predictions:\n", + " mask_pil = prediction_to_mask_pil(prediction)\n", + "\n", + " # Gives the mask a distinct color and makes the background transparent\n", + " color = (\n", + " random.randint(0, 255),\n", + " random.randint(0, 255),\n", + " random.randint(0, 255),\n", + " 128,\n", + " )\n", + " colored_mask = Image.new(\"RGBA\", mask_pil.size, color)\n", + " colored_mask = Image.composite(\n", + " colored_mask, Image.new(\"RGBA\", mask_pil.size), mask_pil\n", + " )\n", + "\n", + " # Pastes the colored mask onto the result image\n", + " overlayed_image.paste(colored_mask, (0, 0), colored_mask)\n", + "\n", + " return overlayed_image\n", + "\n", + "\n", + "# Displays a PIL image horizontally next to a generated mask from the response.\n", + "def display_horizontally(\n", + " input_images: list, mask_index: int = -1, figsize: tuple[int, int] = (15, 15)\n", + "):\n", + " count = len(input_images)\n", + " fig, ax = plt.subplots(1, count, figsize=figsize)\n", + "\n", + " for i in range(count):\n", + " cmap = \"gray\" if i == mask_index else None\n", + " ax[i].imshow(input_images[i], cmap)\n", + " ax[i].axis(\"off\")\n", + "\n", + " plt.show()\n", + "\n", + "\n", + "# Generates a transparent PNG image from an input image and its generated mask.\n", + "def generate_transparent_image(image_pil: str, mask_pil: Image) -> Image:\n", + " transparent_image = Image.new(\"RGBA\", image_pil.size, (128, 128, 128, 255))\n", + "\n", + " transparent_image.paste(image_pil, mask=mask_pil)\n", + " transparent_image.putalpha(mask_pil)\n", + " return transparent_image\n", + "\n", + "\n", + "def draw_bounding_boxes(base_image: Image, response: PredictResponse):\n", + " bbox_image = base_image.copy()\n", + " labeled_boxes = get_labeled_boxes(response)\n", + " color = \"red\"\n", + " draw = ImageDraw.Draw(bbox_image)\n", + " for box in labeled_boxes:\n", + " bounding_box = box[2]\n", + " draw.rectangle(bounding_box, outline=color, width=2)\n", + "\n", + " font = ImageFont.load_default_imagefont()\n", + " text_label = f\"{box[0]}: {box[1]}\"\n", + " text_width = draw.textlength(text_label, font=font) + 3 # Add 2 for padding\n", + " text_height = 12\n", + " label_x = bounding_box[0]\n", + " label_y = bounding_box[1] - text_height - 2 # Position label above the box\n", + "\n", + " # Draw a filled rectangle as the background for the label\n", + " draw.rectangle(\n", + " (label_x, label_y, label_x + text_width, label_y + text_height),\n", + " fill=color,\n", + " )\n", + " draw.text((label_x + 2, label_y), text_label, fill=\"white\", font=font)\n", + "\n", + " return bbox_image\n", + "\n", + "\n", + "def get_prediction_top_label(prediction) -> str:\n", + " # Labels returned on a single prediction are sorted by score.\n", + " label = prediction[\"labels\"][0][\"label\"]\n", + " score = prediction[\"labels\"][0][\"score\"]\n", + " return label, score\n", + "\n", + "\n", + "# Calculates the bounding box of the masked area in a mask image.\n", + "def get_bounding_box(mask: Image) -> tuple | None:\n", + " mask_array = mask.convert(\"1\").getdata()\n", + " width, height = mask.size\n", + " x1, y1, x2, y2 = width, height, 0, 0\n", + "\n", + " for y in range(height):\n", + " for x in range(width):\n", + " if mask_array[y * width + x]: # If pixel is white\n", + " x1 = min(x1, x)\n", + " y1 = min(y1, y)\n", + " x2 = max(x2, x)\n", + " y2 = max(y2, y)\n", + "\n", + " if x1 > x2 or y1 > y2:\n", + " return None # No masked area found\n", + " else:\n", + " return (x1, y1, x2 + 1, y2 + 1) # Add 1 to include the last pixel\n", + "\n", + "\n", + "def get_labeled_boxes(response: PredictResponse) -> list:\n", + " labeled_boxes = []\n", + " for prediction in response.predictions:\n", + " mask_pil = prediction_to_mask_pil(prediction)\n", + " bounding_box = get_bounding_box(mask_pil)\n", + " if bounding_box:\n", + " label, score = get_prediction_top_label(prediction)\n", + " score = round(float(score), 3)\n", + " labeled_box = (label, score, bounding_box)\n", + " labeled_boxes.append(labeled_box)\n", + "\n", + " return labeled_boxes\n", + "\n", + "\n", + "# Constructs a Vertex AI PredictRequest and uses it to call Image Segmentation.\n", + "def call_vertex_image_segmentation(\n", + " image_bytes=None,\n", + " gcs_uri=None,\n", + " mime_type=None,\n", + " mode=\"foreground\",\n", + " prompt=None,\n", + " scribble_bytes=None,\n", + " mask_dilation=None,\n", + " max_predictions=None,\n", + " confidence_threshold=None,\n", + "):\n", + " instances = []\n", + "\n", + " if image_bytes:\n", + " instances.append(\n", + " {\n", + " \"image\": {\n", + " \"bytesBase64Encoded\": image_bytes,\n", + " \"mimeType\": mime_type,\n", + " },\n", + " }\n", + " )\n", + " elif gcs_uri:\n", + " instances.append(\n", + " {\n", + " \"image\": {\"gcsUri\": gcs_uri},\n", + " }\n", + " )\n", + "\n", + " if scribble_bytes:\n", + " instances[0][\"scribble\"] = {\n", + " \"image\": {\n", + " \"bytesBase64Encoded\": scribble_bytes,\n", + " \"mimeType\": \"image/png\",\n", + " },\n", + " }\n", + "\n", + " if prompt:\n", + " instances[0][\"prompt\"] = prompt\n", + "\n", + " parameters = {\"mode\": mode}\n", + "\n", + " if mask_dilation:\n", + " parameters[\"maskDilation\"] = mask_dilation\n", + " if max_predictions:\n", + " parameters[\"maxPredictions\"] = max_predictions\n", + " if confidence_threshold:\n", + " parameters[\"confidenceThreshold\"] = confidence_threshold\n", + "\n", + " start = timeit.default_timer()\n", + " response = client.predict(\n", + " endpoint=model_endpoint, instances=instances, parameters=parameters\n", + " )\n", + " end = timeit.default_timer()\n", + " print(f\"Vertex Image Segmentation took {end - start:.2f}s.\")\n", + "\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R45VRKWInfQQ" + }, + "source": [ + "## Select an image to segment\n", + "\n", + "Run this cell to enable and select the `Choose files` button.\n", + "You can then select an image file from your local device to upload.\n", + "Large images are resized to a maximum dimension of 640 pixels for faster processing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "I9caIrZ7Dek1" + }, + "outputs": [], + "source": [ + "from google.colab import files\n", + "\n", + "images = files.upload()\n", + "RAW_IMAGE_BYTES = list(images.values())[0]\n", + "ENCODED_IMAGE_BYTES = base64.b64encode(RAW_IMAGE_BYTES).decode(\"utf-8\")\n", + "INPUT_IMAGE_PIL = Image.open(io.BytesIO(RAW_IMAGE_BYTES)).convert(\"RGB\")\n", + "INPUT_IMAGE_PIL.thumbnail((640, 640))\n", + "plt.axis(\"off\")\n", + "plt.imshow(INPUT_IMAGE_PIL)\n", + "\n", + "print(\n", + " f\"image size(with x height): {INPUT_IMAGE_PIL.size[0]} x {INPUT_IMAGE_PIL.size[1]}\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fU32286ooc8Q" + }, + "source": [ + "## Segment images using different modes\n", + "\n", + "You can generate image masks with different Image Segmentation features by setting the `mode` field to one of the available options:\n", + "* **Foreground**: Generate a mask of the segmented foreground of the image.\n", + "* **Background**: Generate a mask of the segmented background of the image.\n", + "* **Semantic**: Select the items in an image to segment from a set of 194 classes.\n", + "* **Prompt**: Use an open-vocabulary text prompt to guide the image segmentation.\n", + "* **Interactive**: Draw a rough mask to guide the model segmentation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mBLJtICO8iMQ" + }, + "source": [ + "### Foreground segmentation request\n", + "\n", + "This section will explores how to edit images using different `edit_mode` and `mask_mode` parameter options." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c9N8l0oo_cWs" + }, + "outputs": [], + "source": [ + "image_bytes = ENCODED_IMAGE_BYTES # Base64 encoded input image bytes\n", + "gcs_uri = None # gs:// path to the input image\n", + "mime_type = None # Image file type (JPEG, PNG, WEBP)\n", + "mode = \"foreground\" # Segmentation mode [foreground,background,semantic,prompt,interactive]\n", + "prompt = None # Prompt to guide segmentation for `semantic` and `prompt` modes\n", + "scribble_bytes = None # Input scribble for `interactive` segment mode\n", + "mask_dilation = (\n", + " None # Optional mask dilation for thin objects. Numeric value between 0 and 1.\n", + ")\n", + "max_predictions = (\n", + " None # Optional maximum predictions limit for prompt mode. Unlimited by default.\n", + ")\n", + "confidence_threshold = (\n", + " None # Optional confidence limit for prompt/background/foreground modes.\n", + ")\n", + "\n", + "response = call_vertex_image_segmentation(\n", + " image_bytes,\n", + " gcs_uri,\n", + " mime_type,\n", + " mode,\n", + " prompt,\n", + " scribble_bytes,\n", + " mask_dilation,\n", + " max_predictions,\n", + " confidence_threshold,\n", + ")\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "display_horizontally([INPUT_IMAGE_PIL, MASK_PIL], mask_index=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a5yLGWLwOVIJ" + }, + "source": [ + "#### Background removal\n", + "Use the foreground segmentation mask you created above to make the image background transparent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-5K0CSb0twPO" + }, + "outputs": [], + "source": [ + "# Creates an empty transparent background.\n", + "transparent_background = Image.new(\"RGBA\", INPUT_IMAGE_PIL.size, (128, 128, 128, 255))\n", + "\n", + "# Uses the mask to cut and paste the foreground object in the original image\n", + "# onto the transparent background.\n", + "transparent_background.paste(INPUT_IMAGE_PIL, mask=MASK_PIL)\n", + "transparent_background.putalpha(MASK_PIL)\n", + "\n", + "display(transparent_background)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yEUZeTtyO01R" + }, + "source": [ + "#### Change background color" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "k-WiqiFJO448" + }, + "outputs": [], + "source": [ + "# RGBA color light blue\n", + "color = (141, 224, 254, 255)\n", + "input_image = INPUT_IMAGE_PIL.copy()\n", + "gray_background = Image.new(\"RGBA\", input_image.size, color)\n", + "gray_background.paste(input_image, mask=MASK_PIL)\n", + "\n", + "display(gray_background)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "--7rofOb95hT" + }, + "source": [ + "### Background segment mode\n", + "\n", + "Generate background masks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JVtC3lFAGoAu" + }, + "outputs": [], + "source": [ + "response = call_vertex_image_segmentation(\n", + " image_bytes=ENCODED_IMAGE_BYTES,\n", + " gcs_uri=None,\n", + " mime_type=None,\n", + " mode=\"background\",\n", + " prompt=None,\n", + " scribble_bytes=None,\n", + " mask_dilation=None,\n", + " max_predictions=None,\n", + " confidence_threshold=None,\n", + ")\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "display_horizontally([INPUT_IMAGE_PIL, MASK_PIL], mask_index=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U9pfcnNsGtcv" + }, + "source": [ + "### Semantic segment mode\n", + "\n", + "Specify the objects to segment from the set of 194 classes. The full set is available in the Appendix section at the end of this tutorial. You can specify multiple classes by delimiting with commas, e.g. `prompt=\"cat, dog\"`\n", + "\n", + "The semantic segmenter will return a single prediction containing the generated mask. If the classes in the prompt are detected, they are masked in white pixels and the background will be black. If the requested classes are not detected in the image, the whole mask will be black." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Aar3Pn3yG75T" + }, + "outputs": [], + "source": [ + "mode = \"semantic\"\n", + "prompt = \"motorcycle, bus\"\n", + "\n", + "response = call_vertex_image_segmentation(\n", + " image_bytes=ENCODED_IMAGE_BYTES,\n", + " gcs_uri=None,\n", + " mime_type=None,\n", + " mode=mode,\n", + " prompt=prompt,\n", + " scribble_bytes=None,\n", + " mask_dilation=None,\n", + " max_predictions=None,\n", + " confidence_threshold=None,\n", + ")\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "display_horizontally([INPUT_IMAGE_PIL, MASK_PIL], mask_index=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CO4q2sacIydg" + }, + "source": [ + "### Prompt instance segmentation mode\n", + "\n", + "You can use Prompt mode to perform detection and segmentation on many instances of your suggested objects. The response can generate multiple masks, along with one or more associated labels for each mask. Each label also contains an confidence score. Only objects matching labels specified in the request prompt are detected and segmented. The prompt is completely open-vocabulary, it is not limited to any class set.\n", + "\n", + "**Recommended**:\n", + "* Use the confidence_threshold and max_predictions parameters to filter and limit results\n", + "* You can request multiple items be detected by separating them with commas. Hundreds of classes can be set in a single prompt." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "unrHSqhqHmlI" + }, + "outputs": [], + "source": [ + "mode = \"prompt\"\n", + "prompt = \"green watermelon, cantaloupe, price tag\"\n", + "confidence_threshold = 0.1\n", + "max_predictions = None\n", + "\n", + "response = call_vertex_image_segmentation(\n", + " image_bytes=ENCODED_IMAGE_BYTES,\n", + " gcs_uri=None,\n", + " mime_type=None,\n", + " mode=mode,\n", + " prompt=prompt,\n", + " scribble_bytes=None,\n", + " mask_dilation=None,\n", + " max_predictions=max_predictions,\n", + " confidence_threshold=confidence_threshold,\n", + ")\n", + "\n", + "print(f\"Number of predictions is {str(len(response.predictions))}\")\n", + "\n", + "bbox_image = draw_bounding_boxes(INPUT_IMAGE_PIL, response)\n", + "overlayed_image = overlay(INPUT_IMAGE_PIL, response)\n", + "display_horizontally([INPUT_IMAGE_PIL, bbox_image, overlayed_image], figsize=(25, 25))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sk0eXjQ1nR4F" + }, + "source": [ + "## Conclusion\n", + "\n", + "You have explored the Vertex AI's Image Segmentation service and its features.\n", + "\n", + "Check out the Vertex AI reference to learn more about how to [Segment images](https://cloud.google.com/vertex-ai/generative-ai/docs/image/img-gen-prompt-guide#edit-prompts)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UuaLTarf-hvO" + }, + "source": [ + "## Appendix" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7ZzRAwQ0dIjT" + }, + "source": [ + "### Semantic segmentation classes\n", + "\n", + "| Class ID | Class ID | Class ID | Class ID |\n", + "| --- | --- | --- | --- |\n", + "| backpack | broccoli | road | mountain_hill |\n", + "| umbrella | carrot | snow | rock |\n", + "| bag | hot_dog | sidewalk_pavement | frisbee |\n", + "| tie | pizza | runway | skis |\n", + "| suitcase | donut | terrain | snowboard |\n", + "| case | cake | book | sports_ball |\n", + "| bird | fruit_other | box | kite |\n", + "| cat | food_other | clock | baseball_bat |\n", + "| dog | chair_other | vase | baseball_glove |\n", + "| horse | armchair | scissors | skateboard |\n", + "| sheep | swivel_chair | plaything_other | surfboard |\n", + "| cow | stool | teddy_bear | tennis_racket |\n", + "| elephant | seat | hair_dryer | net |\n", + "| bear | couch | toothbrush | base |\n", + "| zebra | trash_can | painting | sculpture |\n", + "| giraffe | potted_plant | poster | column |\n", + "| animal_other | nightstand | bulletin_board | fountain |\n", + "| microwave | bed | bottle | awning |\n", + "| radiator | table | cup | apparel |\n", + "| oven | pool_table | wine_glass | banner |\n", + "| toaster | barrel | knife | flag |\n", + "| storage_tank | desk | fork | blanket |\n", + "| conveyor_belt | ottoman | spoon | curtain_other |\n", + "| sink | wardrobe | bowl | shower_curtain |\n", + "| refrigerator | crib | tray | pillow |\n", + "| washer_dryer | basket | range_hood | towel |\n", + "| fan | chest_of_drawers | plate | rug_floormat |\n", + "| dishwasher | bookshelf | person | vegetation |\n", + "| toilet | counter_other | rider_other | bicycle |\n", + "| bathtub | bathroom_counter | bicyclist | car |\n", + "| shower | kitchen_island | motorcyclist | autorickshaw |\n", + "| tunnel | door | paper | motorcycle |\n", + "| bridge | light_other | streetlight | airplane |\n", + "| pier_wharf | lamp | road_barrier | bus |\n", + "| tent | sconce | mailbox | train |\n", + "| building | chandelier | cctv_camera | truck |\n", + "| ceiling | mirror | junction_box | trailer |\n", + "| laptop | whiteboard | traffic_sign | boat_ship |\n", + "| keyboard | shelf | traffic_light | slow_wheeled_object |\n", + "| mouse | stairs | fire_hydrant | river_lake |\n", + "| remote | escalator | parking_meter | sea |\n", + "| cell phone | cabinet | bench | water_other |\n", + "| television | fireplace | bike_rack | swimming_pool |\n", + "| floor | stove | billboard | waterfall |\n", + "| stage | arcade_machine | sky | wall |\n", + "| banana | gravel | pole | window |\n", + "| apple | platform | fence | window_blind |\n", + "| sandwich | playingfield | railing_banister | |\n", + "| orange | railroad | guard_rail | |\n" + ] + } + ], + "metadata": { + "colab": { + "name": "image_segmentation.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 146259f231a7b045185850b7ce3c13ef7879dde0 Mon Sep 17 00:00:00 2001 From: Jeff Nelson Date: Mon, 30 Sep 2024 10:29:28 -0700 Subject: [PATCH 28/76] feat: Adds BigQuery and BQML RAG notebook (#1189) # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .github/CODEOWNERS | 1 + .github/actions/spelling/allow.txt | 2 + .../rag_with_bigquery.ipynb | 875 ++++++++++++++++++ 3 files changed, 878 insertions(+) create mode 100644 gemini/use-cases/retrieval-augmented-generation/rag_with_bigquery.ipynb diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 61c41f5e48..1a5743e4c9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -72,3 +72,4 @@ /generative-ai/open-models/serving/cloud_run_ollama_gemma2_rag_qa.ipynb @eliasecchig @GoogleCloudPlatform/generative-ai-devrel /generative-ai/open-models/serving/vertex_ai_text_generation_inference_gemma.ipynb @alvarobartt @philschmid @pagezyhf @jeffboudier /generative-ai/gemini/use-cases/applying-llms-to-data/semantic-search-in-bigquery/stackoverflow_questions_semantic_search.ipynb @sethijaideep @GoogleCloudPlatform/generative-ai-devrel +/generative-ai/gemini/use-cases/retrieval-augmented-generation/raw_with_bigquery.ipynb @jeffonelson @GoogleCloudPlatform/generative-ai-devrel diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 160a5dd119..13d6ed4657 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -1,4 +1,5 @@ AFX +AGG AIP AMNOSH ANZ @@ -591,6 +592,7 @@ subviews subword supima sxs +tabular tagline tencel termcolor diff --git a/gemini/use-cases/retrieval-augmented-generation/rag_with_bigquery.ipynb b/gemini/use-cases/retrieval-augmented-generation/rag_with_bigquery.ipynb new file mode 100644 index 0000000000..caf6567c57 --- /dev/null +++ b/gemini/use-cases/retrieval-augmented-generation/rag_with_bigquery.ipynb @@ -0,0 +1,875 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Run RAG Pipelines in BigQuery with BQML and Vector Search\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Jeff Nelson](https://github.com/jeffonelson/), Eric Hao |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "This notebook demonstrates a basic end-to-end retrieval-augmented generation (RAG) pipeline using [BigQuery](https://cloud.google.com/bigquery/) and [BigQuery ML](https://cloud.google.com/bigquery/docs/bqml-introduction) functions. To do so, we:\n", + "\n", + "* Complete setup steps to download sample data and access [Vertex AI](https://cloud.google.com/vertex-ai) from BigQuery\n", + "* Generate [object table](https://cloud.google.com/bigquery/docs/object-table-introduction) to access unstructured PDFs that reside in [Cloud Storage](https://cloud.google.com/storage)\n", + "* Create a remote model, so BigQuery can call [Document AI](https://cloud.google.com/document-ai) to parse the PDF inputs\n", + "* Parse response from Document AI into chunks and metadata, then generate vector embeddings for the chunks\n", + "* Run a [vector search](https://cloud.google.com/bigquery/docs/vector-search) against embeddings in BigQuery, return relevant chunks, and summarize them with Gemini" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dc949afc1f08" + }, + "source": [ + "## How to open this notebook in BigQuery Studio\n", + "\n", + "This notebook was written to be compatible for use within BigQuery Studio. To open this notebook in BigQuery, click to [Run in Colab Enterprise](https://console.cloud.google.com/vertex-ai/colab/import/https:%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%2Fgenerative-ai%2Fmain%2Fgemini%2Fuse-cases%2Fretrieval-augmented-generation%2Frag_with_bigquery.ipynb). This will open a new window in the Cloud Console and prompt you to confirm import. Then, navigate to BigQuery, where you will find the notebook available in the Explorer pane under Notebooks." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5ba5c12e483d" + }, + "source": [ + "## About the dataset\n", + "\n", + "This example uses [Alphabet's 2023 10-K](https://abc.xyz/assets/43/44/675b83d7455885c4615d848d52a4/goog-10-k-2023.pdf) form. This is a detailed overview of the company's financial information and includes text, tables, and diagrams spanning nearly 100 pages." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2ce33dbc8fde" + }, + "source": [ + "## Services and Costs\n", + "\n", + "This tutorial uses the following Google Cloud data analytics and ML services, they are billable components of Google Cloud:\n", + "\n", + "* BigQuery & BigQuery ML [(pricing)](https://cloud.google.com/bigquery/pricing)\n", + "* Vertex AI Generative AI models [(pricing)](https://cloud.google.com/vertex-ai/generative-ai/pricing)\n", + "* Document AI [(pricing)](https://cloud.google.com/document-ai/pricing)\n", + "* Cloud Storage [(pricing)](https://cloud.google.com/storage/pricing)\n", + "\n", + "Use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "# Setup Steps to access Vertex AI models from BigQuery and enable APIs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ff210a6d4d21" + }, + "source": [ + "### Install Document AI SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2e9e2b9e1b1f" + }, + "outputs": [], + "source": [ + "!pip install --quiet google-cloud-documentai==2.31.0" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8ed31279f009" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "567212ff53a6" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b96b39fd4d7b" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fa362c2ef5b5" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "9a07a9f9a4a9" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()\n", + " print(\"Authenticated\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Define your Google Cloud project" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"your-project-id\" # @param {type: \"string\"}\n", + "PROJECT_NUMBER = \"your-project-number\" # @param {type: \"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "04deeb11bbca" + }, + "source": [ + "### Enable Data Table Display\n", + "\n", + "This makes it easier to visualize tabular data within a Notebook environment later on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "af9974f04f9f" + }, + "outputs": [], + "source": [ + "%load_ext google.colab.data_table" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b4256d07d596" + }, + "source": [ + "### Create a new dataset in BigQuery\n", + "\n", + "This will house any tables created throughout this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8a4c1a356d10" + }, + "outputs": [], + "source": [ + "!bq mk --location=us --dataset --project_id={PROJECT_ID} docai_demo" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a100b689816b" + }, + "source": [ + "### Create a Cloud resource connection\n", + "\n", + "[Cloud resource connections](https://cloud.google.com/bigquery/docs/create-cloud-resource-connection) enable BigQuery to access other Cloud services, like Cloud Storage and Vertex AI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "885da43402b1" + }, + "outputs": [], + "source": [ + "!bq mk --connection --connection_type=CLOUD_RESOURCE --location=us --project_id={PROJECT_ID} \"demo_conn\"\n", + "!bq show --location=us --connection --project_id={PROJECT_ID} \"demo_conn\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dd9f6cbe4393" + }, + "source": [ + "### Add permissions to Cloud resource connection service account\n", + "\n", + "The Cloud resource connection is associated with a service account. The following cell enables the service account to access services like Document AI, Cloud Storage, and Vertex AI.\n", + "\n", + "**Note:** Copy the service account ID from the prior cell and input it below. It will look like `your-copied-service-account@gcp-sa-bigquery-condel.iam.gserviceaccount.com`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "16b193a840cd" + }, + "outputs": [], + "source": [ + "connection_service_account = \"your-copied-service-account@gcp-sa-bigquery-condel.iam.gserviceaccount.com\" # @param {type: \"string\"}\n", + "connection_member = f\"serviceAccount:{connection_service_account}\"\n", + "\n", + "\n", + "!gcloud projects add-iam-policy-binding {PROJECT_ID} --member={connection_member} --role='roles/documentai.viewer' --condition=None --quiet\n", + "!gcloud projects add-iam-policy-binding {PROJECT_ID} --member={connection_member} --role='roles/storage.objectViewer' --condition=None --quiet\n", + "!gcloud projects add-iam-policy-binding {PROJECT_ID} --member={connection_member} --role='roles/aiplatform.user' --condition=None --quiet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ba09d9393559" + }, + "source": [ + "### Download the sample PDF used for this notebook and store it in a new Cloud Storage bucket" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4605453a6675" + }, + "outputs": [], + "source": [ + "import random\n", + "\n", + "# Create a unique Cloud Storage bucket name\n", + "bucket_name = f\"{PROJECT_ID}-{random.randint(10000, 99999)}\"\n", + "\n", + "# Create the bucket\n", + "!gsutil mb -l US -p {PROJECT_ID} gs://{bucket_name}\n", + "\n", + "# Download the PDF sample\n", + "!wget goog-10-k-2023.pdf \"https://www.abc.xyz/assets/43/44/675b83d7455885c4615d848d52a4/goog-10-k-2023.pdf\"\n", + "\n", + "# Upload the PDF sample to the newly created Cloud Storage bucket\n", + "!gsutil cp goog-10-k-2023.pdf gs://{bucket_name}/\n", + "\n", + "# Print confirmation\n", + "print(f\"PDF uploaded to gs://{bucket_name}/goog-10-k-2023.pdf\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6b0a0bd5c4fd" + }, + "source": [ + "## Create an object table\n", + "\n", + "An object table allows BigQuery to read unstructured data in Google Cloud Storage. This uses the BigQuery Python client library to continue using the `bucket_name` variable." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "94cc075094c4" + }, + "outputs": [], + "source": [ + "from google.cloud import bigquery\n", + "\n", + "client = bigquery.Client(project=PROJECT_ID)\n", + "\n", + "query = f\"\"\"\n", + "CREATE OR REPLACE EXTERNAL TABLE `docai_demo.object_table`\n", + "WITH CONNECTION `us.demo_conn` -- Replace with your connection ID\n", + "OPTIONS (\n", + " uris = ['gs://{bucket_name}/goog-10-k-2023.pdf'],\n", + " object_metadata = 'DIRECTORY'\n", + ");\n", + "\"\"\"\n", + "\n", + "query_job = client.query(query) # API request\n", + "query_job.result() # Waits for the query to complete\n", + "\n", + "print(\"External table docai_demo.object_table created or replaced successfully.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c17ec8736188" + }, + "source": [ + "### Show the object table\n", + "\n", + "Confirm that the results display the PDF document in your Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9f471aa348b2" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "SELECT * \n", + "FROM `docai_demo.object_table`;" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ec9d2c49fd34" + }, + "source": [ + "## Use BQML and Document AI to parse documents" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "afc0a5902ef3" + }, + "source": [ + "### Create a Layout Parser Processor in Document AI\n", + "\n", + "[Create a new processor](https://cloud.google.com/document-ai/docs/create-processor#documentai_fetch_processor_types-python) in Document AI with the type `LAYOUT_PARSER_PROCESSOR`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "519ea8a55496" + }, + "outputs": [], + "source": [ + "from google.api_core.client_options import ClientOptions\n", + "from google.cloud import documentai\n", + "\n", + "location = \"us\"\n", + "processor_display_name = \"layout_parser_processor\"\n", + "processor_type = \"LAYOUT_PARSER_PROCESSOR\"\n", + "\n", + "\n", + "def create_processor_sample(\n", + " PROJECT_ID: str, location: str, processor_display_name: str, processor_type: str\n", + ") -> None:\n", + " opts = ClientOptions(api_endpoint=f\"{location}-documentai.googleapis.com\")\n", + "\n", + " client = documentai.DocumentProcessorServiceClient(client_options=opts)\n", + "\n", + " # The full resource name of the location\n", + " parent = client.common_location_path(PROJECT_ID, location)\n", + "\n", + " # Create a processor\n", + " processor = client.create_processor(\n", + " parent=parent,\n", + " processor=documentai.Processor(\n", + " display_name=processor_display_name, type_=processor_type\n", + " ),\n", + " )\n", + "\n", + " # Return the processor ID needed for creating a BigQuery connection\n", + " return processor.name.split(\"/\")[-1]\n", + "\n", + "\n", + "# Call this function to create the processor and return its ID\n", + "processor_id = create_processor_sample(\n", + " PROJECT_ID, location, processor_display_name, processor_type\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5cfcaa4c4584" + }, + "source": [ + "### Create a remote model in BigQuery that connects with your Document AI Layout Parser Processor\n", + "\n", + "This one-time setup step allows BigQuery to reference the Document AI Processor you just created.\n", + "\n", + "**Note:** If if you receive an 400 GET error \"permission denied for document processor\", you may need to wait a minute for permissions to propagate from earlier steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "57233716c232" + }, + "outputs": [], + "source": [ + "query = f\"\"\"\n", + "CREATE OR REPLACE MODEL `docai_demo.layout_parser` \n", + "REMOTE WITH CONNECTION `us.demo_conn`\n", + "OPTIONS(remote_service_type=\"CLOUD_AI_DOCUMENT_V1\", document_processor=\"{processor_id}\")\n", + "\"\"\"\n", + "\n", + "query_job = client.query(query) # API request\n", + "query_job.result() # Waits for the query to complete\n", + "\n", + "print(\"Remote model docai_demo.layout_parser created or replaced successfully.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fc821b6f845d" + }, + "source": [ + "### Process the document using BigQuery ML\n", + "\n", + "Use the [`ML.PROCESS_DOCUMENT` function](https://cloud.google.com/bigquery/docs/process-document) from BigQuery to call your Document AI processor and pass through the PDF. This uses the Layout Parser configuration and chunks your document.\n", + "\n", + "**Note:** this may take a minute or so to complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a489b3cb9e1d" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID --location us\n", + "\n", + "CREATE or REPLACE TABLE docai_demo.demo_result AS (\n", + " SELECT * FROM ML.PROCESS_DOCUMENT(\n", + " MODEL docai_demo.layout_parser,\n", + " TABLE docai_demo.object_table,\n", + " PROCESS_OPTIONS => (JSON '{\"layout_config\": {\"chunking_config\": {\"chunk_size\": 250}}}')\n", + " )\n", + ");" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bfb0a1fa3266" + }, + "source": [ + "### Parse the JSON results returned to BigQuery\n", + "\n", + "The `ML.PROCESS_DOCUMENT` function parses the PDF from Cloud Storage and returns a JSON blob to BigQuery. In this step, we'll parse the JSON, extract document chunks and metadata, and return it to a new BigQuery table." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2bc4dad2e399" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID --location us\n", + "\n", + "CREATE OR REPLACE TABLE docai_demo.demo_result_parsed AS (\n", + "SELECT\n", + " uri,\n", + " JSON_EXTRACT_SCALAR(json , '$.chunkId') AS id,\n", + " JSON_EXTRACT_SCALAR(json , '$.content') AS content,\n", + " JSON_EXTRACT_SCALAR(json , '$.pageFooters[0].text') AS page_footers_text,\n", + " JSON_EXTRACT_SCALAR(json , '$.pageSpan.pageStart') AS page_span_start,\n", + " JSON_EXTRACT_SCALAR(json , '$.pageSpan.pageEnd') AS page_span_end\n", + "FROM docai_demo.demo_result, UNNEST(JSON_EXTRACT_ARRAY(ml_process_document_result.chunkedDocument.chunks, '$')) json\n", + ");" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "adca53cc55d8" + }, + "source": [ + "### Display the parsed document chunks\n", + "\n", + "Show a preview of the parsed results and metadata." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c60bcdc388c4" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID --location us\n", + "\n", + "SELECT *\n", + "FROM docai_demo.demo_result_parsed\n", + "ORDER BY id\n", + "LIMIT 5;" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a980e66443bc" + }, + "source": [ + "## Connect to Vertex AI embedding generation and Gemini access" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eae56fa8c74c" + }, + "source": [ + "### Connect to a text embedding model\n", + "\n", + "[Create a remote model](https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-create-remote-model) allowing BigQuery access to a text embedding model hosted in Vertex AI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3c53a24e59a1" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "CREATE OR REPLACE MODEL `docai_demo.embedding_model` \n", + "REMOTE WITH CONNECTION `us.demo_conn` OPTIONS(endpoint=\"text-embedding-004\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "45d1ccc016c8" + }, + "source": [ + "### Generate embeddings\n", + "\n", + "Use the [`ML.GENERATE_EMBEDDING` function](https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-generate-embedding) in BigQuery to generate embeddings for all text chunks in the document." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "63bf77f48b8c" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "CREATE OR REPLACE TABLE `docai_demo.embeddings` AS\n", + "SELECT * FROM ML.GENERATE_EMBEDDING(\n", + " MODEL `docai_demo.embedding_model`,\n", + " TABLE `docai_demo.demo_result_parsed`\n", + ");" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e1ce3b78e01a" + }, + "source": [ + "### Connect to a Gemini LLM endpoint\n", + "\n", + "Create a remote model allowing BigQuery access to a Gemini foundation model hosted in Vertex AI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7b760c54502e" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "CREATE OR REPLACE MODEL `docai_demo.gemini_flash` REMOTE\n", + "WITH CONNECTION `us.demo_conn` OPTIONS(endpoint=\"gemini-1.5-flash\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "879593c348c4" + }, + "source": [ + "## Run vector search, return results, and pass them to Gemini for text generation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2eb640b369a3" + }, + "source": [ + "### Sample BigQuery vector search\n", + "\n", + "Run a sample BigQuery vector search against your chunks. This query takes your text input, creates an embedding using the `ML.GENERATE_EMBEDDING` function, and then passes the embedding through to the [`VECTOR_SEARCH` function](https://cloud.google.com/bigquery/docs/reference/standard-sql/search_functions#vector_search). The results are the top ten chunks that are most semantically related to your input.\n", + "\n", + "In the search query below, the input text asks \"What was Alphabets revenue in 2023?\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cf9fa689905d" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "SELECT query.query, base.uri, base.id, base.content, distance\n", + " FROM\n", + " VECTOR_SEARCH( TABLE `docai_demo.embeddings`,\n", + " 'ml_generate_embedding_result',\n", + " (\n", + " SELECT\n", + " ml_generate_embedding_result,\n", + " content AS query\n", + " FROM\n", + " ML.GENERATE_EMBEDDING( MODEL `docai_demo.embedding_model`,\n", + " ( SELECT 'What was Alphabets revenue in 2023?' AS content)\n", + " ) \n", + " ),\n", + " top_k => 10,\n", + " OPTIONS => '{\"fraction_lists_to_search\": 0.01}') \n", + "ORDER BY distance DESC;" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "623765bd9154" + }, + "source": [ + "## Generate text augmented by vector search results\n", + "\n", + "This step builds upon the prior one - but instead of simply returning the top text chunks, it calls the `ML.GENERATE_TEXT` function to summarize them alongside the question we input.\n", + "\n", + "In this query you:\n", + "* **Retrieve** the closest chunks semantically using the `VECTOR_SEARCH` function (this is what was done in the prior query)\n", + "* **Augment** the Gemini LLM with this knowledge\n", + "* **Generate** a succinct answer using the `ML.GENERATE_TEXT` function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2f6f83f2eca7" + }, + "outputs": [], + "source": [ + "%%bigquery --project $PROJECT_ID\n", + "\n", + "SELECT\n", + " ml_generate_text_llm_result AS generated,\n", + " -- prompt -- Commented out, but please feel free to uncomment if you would like to see the full context passed to the Gemini model\n", + "FROM\n", + " ML.GENERATE_TEXT( MODEL `docai_demo.gemini_flash`,\n", + " (\n", + " SELECT\n", + " CONCAT( 'What is yearly revenue for Alphabet in the last three years? Use the context and mention the reference file used in the answer: ',\n", + " STRING_AGG(FORMAT(\"context: %s and reference: %s\", base.content, base.uri), ',\\n')) AS prompt,\n", + " FROM\n", + " VECTOR_SEARCH( TABLE \n", + " `docai_demo.embeddings`,\n", + " 'ml_generate_embedding_result',\n", + " (\n", + " SELECT\n", + " ml_generate_embedding_result,\n", + " content AS query\n", + " FROM\n", + " ML.GENERATE_EMBEDDING( MODEL `docai_demo.embedding_model`,\n", + " (\n", + " SELECT\n", + " 'Alphabets revenue' AS content\n", + " )\n", + " ) \n", + " ),\n", + " top_k => 10,\n", + " OPTIONS => '{\"fraction_lists_to_search\": 0.01}') \n", + " ),\n", + " STRUCT(512 AS max_output_tokens, TRUE AS flatten_json_output)\n", + " );\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a4e033321ad" + }, + "source": [ + "# Cleaning up\n", + "\n", + "To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n", + "\n", + "Otherwise, you can delete the individual resources you created in this tutorial by uncommenting the below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1ab59128be6c" + }, + "outputs": [], + "source": [ + "#\n", + "# !bq rm -r -f $PROJECT_ID:docai_demo\n", + "# !bq rm --connection --project_id=$PROJECT_ID --location=us demo_conn\n", + "# !gsutil rm -r gs://{bucket_name}\n", + "#" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aefa89207b70" + }, + "source": [ + "# Wrap up\n", + "\n", + "This notebook demonstrates an example of how to achieve a basic end-to-end retrieval-augmented generation pipeline using BigQuery. It integrates BigQuery ML functions like `ML.PROCESS_DOCUMENT` to call Document AI and parse PDFs, `ML.GENERATE_EMBEDDING` to generate embeddings on text chunks and input queries, and `ML.GENERATE_TEXT` to provide a concise answer. It also uses the `VECTOR_SEARCH` function to identify similar text (using embeddings) in BigQuery using familiar SQL syntax.\n", + "\n", + "To continue learn more, check out our documentation on [BigQuery ML](https://cloud.google.com/bigquery/docs/bqml-introduction) and [BigQuery Vector Search](https://cloud.google.com/bigquery/docs/vector-search)." + ] + } + ], + "metadata": { + "colab": { + "name": "rag_with_bigquery.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 30f11700cd62fd1f88191a5ff2b6ac9b14a0c36a Mon Sep 17 00:00:00 2001 From: Alicia Williams Date: Mon, 30 Sep 2024 10:41:08 -0700 Subject: [PATCH 29/76] fix: Update poster_image_analysis.ipynb (#1175) Update text embedding model to supported 'text-multilingual-embedding-002' Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Co-authored-by: Eric Dong --- .../poster_image_analysis.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/use-cases/applying-llms-to-data/analyze-poster-images-in-bigquery/poster_image_analysis.ipynb b/gemini/use-cases/applying-llms-to-data/analyze-poster-images-in-bigquery/poster_image_analysis.ipynb index 473146e31a..248413468f 100644 --- a/gemini/use-cases/applying-llms-to-data/analyze-poster-images-in-bigquery/poster_image_analysis.ipynb +++ b/gemini/use-cases/applying-llms-to-data/analyze-poster-images-in-bigquery/poster_image_analysis.ipynb @@ -452,7 +452,7 @@ "%%bigquery\n", "CREATE OR REPLACE MODEL `gemini_demo.text_embedding`\n", "REMOTE WITH CONNECTION `us.gemini_conn`\n", - "OPTIONS (endpoint = 'textembedding-gecko-multilingual')" + "OPTIONS (endpoint = 'text-multilingual-embedding-002')" ] }, { From 405b9de2988ed41266c49f2c6f9b9e04b298374a Mon Sep 17 00:00:00 2001 From: Jason Dai <44714045+jsondai@users.noreply.github.com> Date: Mon, 30 Sep 2024 15:35:09 -0700 Subject: [PATCH 30/76] feat: add public notebook for evaluating models in Vertex AI Studio and model garden (#1192) # Description feat: add public notebook for evaluating models in Vertex AI Studio and model garden chore: update introduction texts and titles for gen ai evaluation service tutorials --------- Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- ...our_own_autorater_with_custom_metric.ipynb | 2 +- ...ng_your_own_computation_based_metric.ipynb | 2 +- ...pare_and_migrate_from_palm_to_gemini.ipynb | 2 +- .../compare_generative_ai_models.ipynb | 2 +- .../customize_model_based_metrics.ipynb | 2 +- ...te_and_compare_gemini_model_settings.ipynb | 10 +- .../evaluation/evaluate_gemini_tool_use.ipynb | 8 +- .../evaluate_langchain_chains.ipynb | 12 +- ...in_vertex_ai_studio_and_model_garden.ipynb | 2294 +++++++++++++++++ ...te_rag_gen_ai_evaluation_service_sdk.ipynb | 6 +- ...tro_to_gen_ai_evaluation_service_sdk.ipynb | 2 +- .../migration_guide_preview_to_GA_sdk.ipynb | 5 +- ...eering_gen_ai_evaluation_service_sdk.ipynb | 6 +- 13 files changed, 2330 insertions(+), 23 deletions(-) create mode 100644 gemini/evaluation/evaluate_models_in_vertex_ai_studio_and_model_garden.ipynb diff --git a/gemini/evaluation/bring_your_own_autorater_with_custom_metric.ipynb b/gemini/evaluation/bring_your_own_autorater_with_custom_metric.ipynb index c8c947af54..3c701cd2a2 100644 --- a/gemini/evaluation/bring_your_own_autorater_with_custom_metric.ipynb +++ b/gemini/evaluation/bring_your_own_autorater_with_custom_metric.ipynb @@ -29,7 +29,7 @@ "id": "w-Edsbk7JRim" }, "source": [ - " # Bring-Your-Own-Autorater using `CustomMetric`\n", + " # Bring-Your-Own-Autorater using `CustomMetric` | Gen AI Evaluation SDK Tutorial\n", "\n", "\n", "\n", diff --git a/gemini/evaluation/bring_your_own_computation_based_metric.ipynb b/gemini/evaluation/bring_your_own_computation_based_metric.ipynb index f2c00e7080..a68d3ff0a6 100644 --- a/gemini/evaluation/bring_your_own_computation_based_metric.ipynb +++ b/gemini/evaluation/bring_your_own_computation_based_metric.ipynb @@ -30,7 +30,7 @@ "id": "kOCmYX5Rc3BZ" }, "source": [ - "# Bring your own computation-based `CustomMetric`\n", + "# Bring your own computation-based `CustomMetric` | Gen AI Evaluation SDK Tutorial\n", "\n", "\n", "
\n", diff --git a/gemini/evaluation/compare_and_migrate_from_palm_to_gemini.ipynb b/gemini/evaluation/compare_and_migrate_from_palm_to_gemini.ipynb index fc7a600795..b6febbb9a7 100644 --- a/gemini/evaluation/compare_and_migrate_from_palm_to_gemini.ipynb +++ b/gemini/evaluation/compare_and_migrate_from_palm_to_gemini.ipynb @@ -30,7 +30,7 @@ "id": "UzRB5KCb4Z94" }, "source": [ - "# Migrate from PaLM to Gemini model with Gen AI Evaluation Service SDK\n", + "# Migrate from PaLM to Gemini model | Gen AI Evaluation SDK Tutorial\n", "\n", "\n", "
\n", diff --git a/gemini/evaluation/compare_generative_ai_models.ipynb b/gemini/evaluation/compare_generative_ai_models.ipynb index b0551b0af7..995b75207a 100644 --- a/gemini/evaluation/compare_generative_ai_models.ipynb +++ b/gemini/evaluation/compare_generative_ai_models.ipynb @@ -29,7 +29,7 @@ "id": "QN61Ug4hLby5" }, "source": [ - " # Compare Generative AI Models\n", + " # Compare Generative AI Models | Gen AI Evaluation SDK Tutorial\n", "\n", "
\n", "
\n", diff --git a/gemini/evaluation/customize_model_based_metrics.ipynb b/gemini/evaluation/customize_model_based_metrics.ipynb index 4b4e6974b3..16f4364e36 100644 --- a/gemini/evaluation/customize_model_based_metrics.ipynb +++ b/gemini/evaluation/customize_model_based_metrics.ipynb @@ -30,7 +30,7 @@ "id": "kOCmYX5Rc3BZ" }, "source": [ - "# Customize Model-based Metrics to evaluate a Gen AI model\n", + "# Customize Model-based Metrics to Evaluate a Gen AI model | Gen AI Evaluation SDK Tutorial\n", "\n", "\n", "
\n", diff --git a/gemini/evaluation/evaluate_and_compare_gemini_model_settings.ipynb b/gemini/evaluation/evaluate_and_compare_gemini_model_settings.ipynb index 2abf74be66..74a645c972 100644 --- a/gemini/evaluation/evaluate_and_compare_gemini_model_settings.ipynb +++ b/gemini/evaluation/evaluate_and_compare_gemini_model_settings.ipynb @@ -29,7 +29,7 @@ "id": "JAPoU8Sm5E6e" }, "source": [ - "# Evaluate and Compare Gen AI Model Settings | Gen AI Evaluation Service SDK Tutorial\n", + "# Evaluate and Compare Gen AI Model Settings | Gen AI Evaluation SDK Tutorial\n", "\n", " \n", "
\n", @@ -74,9 +74,13 @@ "source": [ "## Overview\n", "\n", - "Evaluate and select generative AI model settings:\n", + "Evaluate and select generative AI model settings with *Vertex AI Python SDK for Gen AI Evaluation Service*:\n", "\n", - "* Adjust temperature, output token limit, safety settings and other model generation configurations of Gemini models on a summarization task and compare the evaluation results from different model settings on several metrics." + "* Adjust temperature, output token limit, safety settings and other model generation configurations of Gemini models on a summarization task and compare the evaluation results from different model settings on several metrics.\n", + "\n", + "See also: \n", + "\n", + "- Learn more about [Vertex Gen AI Evaluation Service SDK](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview)." ] }, { diff --git a/gemini/evaluation/evaluate_gemini_tool_use.ipynb b/gemini/evaluation/evaluate_gemini_tool_use.ipynb index ba7ca8f7df..be9070722c 100644 --- a/gemini/evaluation/evaluate_gemini_tool_use.ipynb +++ b/gemini/evaluation/evaluate_gemini_tool_use.ipynb @@ -29,7 +29,7 @@ "id": "WKiAD3qCCtOu" }, "source": [ - " # Evaluate Generative Model Tool Use | Gen AI Evaluation Service SDK Tutorial\n", + " # Evaluate Generative Model Tool Use | Gen AI Evaluation SDK Tutorial\n", "\n", " \n", "
\n", @@ -74,7 +74,11 @@ "source": [ "## Overview\n", "\n", - "* Define an API function and a Tool for Gemini model, and evaluate the Gemini tool use quality." + "* Define an API function and a Tool for Gemini model, and evaluate the Gemini model tool use quality with *Vertex AI Python SDK for Gen AI Evaluation Service*.\n", + "\n", + "See also: \n", + "\n", + "- Learn more about [Vertex Gen AI Evaluation Service SDK](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview)." ] }, { diff --git a/gemini/evaluation/evaluate_langchain_chains.ipynb b/gemini/evaluation/evaluate_langchain_chains.ipynb index ca94d90c62..d7f6653505 100644 --- a/gemini/evaluation/evaluate_langchain_chains.ipynb +++ b/gemini/evaluation/evaluate_langchain_chains.ipynb @@ -29,7 +29,7 @@ "id": "7ZX50cNFOFBt" }, "source": [ - " # Evaluate LangChain | Rapid Evaluation SDK Tutorial\n", + " # Evaluate LangChain | Gen AI Evaluation SDK Tutorial\n", "\n", " \n", "
\n", @@ -74,7 +74,7 @@ "source": [ "## Overview\n", "\n", - "With this tutorial, you learn how to evaluate the performance of a conversational LangChain chain using the Vertex AI Rapid Evaluation SDK. The notebook utilizes a dummy chatbot designed to provide recipe suggestions.\n", + "With this tutorial, you learn how to evaluate the performance of a conversational LangChain chain using the *Vertex AI Python SDK for Gen AI Evaluation Service*. The notebook utilizes a dummy chatbot designed to provide recipe suggestions.\n", "\n", "The tutorial goes trough:\n", "1. Data preparation\n", @@ -120,7 +120,7 @@ "source": [ "%pip install --quiet --upgrade nest_asyncio\n", "%pip install --upgrade --user --quiet langchain-core langchain-google-vertexai langchain\n", - "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[rapid_evaluation]\"" + "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[evaluation]\"" ] }, { @@ -245,11 +245,10 @@ "from google.cloud import aiplatform\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_google_vertexai import ChatVertexAI\n", - "import nest_asyncio\n", - "import pandas as pd\n", - "from tqdm import tqdm\n", "\n", "# Main\n", + "import pandas as pd\n", + "from tqdm import tqdm\n", "import vertexai\n", "from vertexai.evaluation import CustomMetric, EvalTask\n", "\n", @@ -275,7 +274,6 @@ "outputs": [], "source": [ "logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.ERROR)\n", - "nest_asyncio.apply()\n", "warnings.filterwarnings(\"ignore\")" ] }, diff --git a/gemini/evaluation/evaluate_models_in_vertex_ai_studio_and_model_garden.ipynb b/gemini/evaluation/evaluate_models_in_vertex_ai_studio_and_model_garden.ipynb new file mode 100644 index 0000000000..03a114cd55 --- /dev/null +++ b/gemini/evaluation/evaluate_models_in_vertex_ai_studio_and_model_garden.ipynb @@ -0,0 +1,2294 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bkIF-qKfOvFl" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KNc5B1-pOvFn" + }, + "source": [ + "# Use Gen AI Evaluation SDK to Evaluate Models in Vertex AI Studio, Model Garden, and Model Registry\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Run in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Run in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kPgYxQc1OvFn" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author(s) | [Jason Dai](https://github.com/jsondai) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vQ_7WkHE3gNO" + }, + "source": [ + "This notebook demonstrates how to get started with using the *Vertex AI Python SDK for Gen AI Evaluation Service* for generative models in Vertex AI Studio, Model Garden, and Model Registry.\n", + "\n", + "Gen AI Evaluation Service empowers you to comprehensively assess and enhance your generative AI models and applications. Whether you're selecting the ideal model, optimizing prompt templates, or evaluating fine-tuned checkpoints, this service provides the tools and insights you need.\n", + "\n", + "In this Colab tutorial, we'll explore three major use cases:\n", + "\n", + "1. Run Evaluation on 1P Models\n", + " * Learn how to evaluate `Gemini` models in Vertex AI Studio using the *Gen AI Evaluation Service SDK*.\n", + "\n", + " * Explore different evaluation metrics and techniques for assessing performance on various tasks.\n", + "\n", + " * Discover how to leverage the SDK for in-depth analysis and comparison of `Gemini` model variants.\n", + "\n", + "\n", + "2. Run Evaluation on 3P Models\n", + " * Learn how to evaluate third-party open models, such as a pretrained `Llama 3.1` model, or a fine-tuned `Llama 3` model deployed in Vertex Model Garden, using the *Gen AI Evaluation Service SDK*.\n", + "\n", + " * Learn how to evaluate third-party closed model APIs, such as Anthropic's `Claude 3.5 Sonnet` model hosted on Vertex AI, using the *Gen AI Evaluation Service SDK*.\n", + "\n", + " * Gain insights into conducting controlled experiments by maintaining the same `EvalTask` configuration with fixed dataset and evaluation metrics while evaluating various model architectures and capabilities.\n", + "\n", + "\n", + "3. Prompt Engineering\n", + "\n", + " * Explore the impact of prompt design on model performance.\n", + " * Utilize the SDK to systematically evaluate and refine your prompts.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fHGZmbkw6GgM" + }, + "source": [ + "For additional use cases and advanced features, refer to our public documentation and notebook tutorials for evaluation use cases:\n", + "\n", + "* https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview#notebooks_for_evaluation_use_cases\n", + "\n", + "* https://cloud.google.com/vertex-ai/generative-ai/docs/models/run-evaluation\n", + "\n", + "Let's get started!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mN5IHo-aOvFo" + }, + "source": [ + "**_NOTE_**: This notebook has been tested in the following environment:\n", + "\n", + "* Python version = 3.10" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3XZf_4VEOvFo" + }, + "source": [ + "## Getting Started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kE20na1OOvFo" + }, + "source": [ + "### Install Vertex AI SDK for Gen AI Evaluation Service" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "abLuRgBzOvFp" + }, + "outputs": [], + "source": [ + "! pip install -U -q google-cloud-aiplatform[evaluation]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kYJVHBVSZgTX" + }, + "source": [ + "### Install other required packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "squkG3h9ZZs8" + }, + "outputs": [], + "source": [ + "! pip install -U -q datasets\n", + "! pip install -U -q anthropic[vertex]\n", + "! pip install -U -q openai" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Pe2lLnYuOvFp" + }, + "source": [ + "### Restart runtime\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "B3FDZs3qOvFp" + }, + "outputs": [], + "source": [ + "# import IPython\n", + "\n", + "# app = IPython.Application.instance()\n", + "# app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LqDc-oyiOvFp" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "x1oLkh17OvFp" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9ygOCeYoOvFp" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8wyNclIAOvFp" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GTL_YzF9OvFq" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", + "EXPERIMENT_NAME = \"gen-ai-eval-experiment\" # @param {type:\"string\"}\n", + "\n", + "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n", + " raise ValueError(\"Please set your PROJECT_ID\")\n", + "\n", + "\n", + "import vertexai\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UBQgjn5wOvFq" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-TmyCxUSOvFq" + }, + "outputs": [], + "source": [ + "from anthropic import AnthropicVertex\n", + "from google.auth import default, transport\n", + "import openai\n", + "from vertexai.evaluation import (\n", + " EvalTask,\n", + " MetricPromptTemplateExamples,\n", + " PairwiseMetric,\n", + " PointwiseMetric,\n", + " PointwiseMetricPromptTemplate,\n", + ")\n", + "from vertexai.generative_models import GenerativeModel" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tfQ7sPtOjZOw" + }, + "source": [ + "### Library settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RjWUgU1TjZOw" + }, + "outputs": [], + "source": [ + "import logging\n", + "import warnings\n", + "\n", + "import pandas as pd\n", + "\n", + "logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.ERROR)\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# pd.set_option('display.max_colwidth', None)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F_Gw6YLeOvFq" + }, + "source": [ + "### Helper functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "x8imb3UdOvFq" + }, + "outputs": [], + "source": [ + "import random\n", + "import string\n", + "\n", + "from IPython.display import HTML, Markdown, display\n", + "import plotly.graph_objects as go\n", + "\n", + "\n", + "def display_explanations(eval_result, metrics=None, n=1):\n", + " \"\"\"Display the explanations.\"\"\"\n", + " style = \"white-space: pre-wrap; width: 1500px; overflow-x: auto;\"\n", + " metrics_table = eval_result.metrics_table\n", + " df = metrics_table.sample(n=n)\n", + "\n", + " if metrics:\n", + " df = df.filter(\n", + " [\"response\", \"baseline_model_response\"]\n", + " + [\n", + " metric\n", + " for metric in df.columns\n", + " if any(selected_metric in metric for selected_metric in metrics)\n", + " ]\n", + " )\n", + " for index, row in df.iterrows():\n", + " for col in df.columns:\n", + " display(HTML(f\"

{col}:

{row[col]}
\"))\n", + " display(HTML(\"
\"))\n", + "\n", + "\n", + "def display_eval_result(eval_result, title=None, metrics=None):\n", + " \"\"\"Display the evaluation results.\"\"\"\n", + " summary_metrics, metrics_table = (\n", + " eval_result.summary_metrics,\n", + " eval_result.metrics_table,\n", + " )\n", + "\n", + " metrics_df = pd.DataFrame.from_dict(summary_metrics, orient=\"index\").T\n", + " if metrics:\n", + " metrics_df = metrics_df.filter(\n", + " [\n", + " metric\n", + " for metric in metrics_df.columns\n", + " if any(selected_metric in metric for selected_metric in metrics)\n", + " ]\n", + " )\n", + " metrics_table = metrics_table.filter(\n", + " [\n", + " metric\n", + " for metric in metrics_table.columns\n", + " if any(selected_metric in metric for selected_metric in metrics)\n", + " ]\n", + " )\n", + "\n", + " if title:\n", + " # Display the title with Markdown for emphasis\n", + " display(Markdown(f\"## {title}\"))\n", + " # Display the summary metrics DataFrame\n", + " display(Markdown(\"### Summary Metrics\"))\n", + " display(metrics_df)\n", + " # Display the metrics table DataFrame\n", + " display(Markdown(\"### Row-based Metrics\"))\n", + " display(metrics_table)\n", + "\n", + "\n", + "def display_radar_plot(eval_results, metrics=None):\n", + " \"\"\"Plot the radar plot.\"\"\"\n", + " fig = go.Figure()\n", + " for item in eval_results:\n", + " title, eval_result = item\n", + " summary_metrics = eval_result.summary_metrics\n", + " if metrics:\n", + " summary_metrics = {\n", + " k.replace(\"/mean\", \"\"): summary_metrics[k]\n", + " for k, v in summary_metrics.items()\n", + " if any(selected_metric + \"/mean\" in k for selected_metric in metrics)\n", + " }\n", + " fig.add_trace(\n", + " go.Scatterpolar(\n", + " r=list(summary_metrics.values()),\n", + " theta=list(summary_metrics.keys()),\n", + " fill=\"toself\",\n", + " name=title,\n", + " )\n", + " )\n", + " fig.update_layout(\n", + " polar=dict(radialaxis=dict(visible=True, range=[0, 5])), showlegend=True\n", + " )\n", + " fig.show()\n", + "\n", + "\n", + "def display_bar_plot(eval_results_list, metrics=None):\n", + " \"\"\"Plot the bar plot.\"\"\"\n", + " fig = go.Figure()\n", + " data = []\n", + "\n", + " for eval_results in eval_results_list:\n", + " title, eval_result = eval_results[0], eval_results[1]\n", + "\n", + " summary_metrics = eval_result.summary_metrics\n", + " mean_summary_metrics = [f\"{metric}/mean\" for metric in metrics]\n", + " updated_summary_metrics = []\n", + " if metrics:\n", + " for k, v in summary_metrics.items():\n", + " if k in mean_summary_metrics:\n", + " updated_summary_metrics.append((k, v))\n", + " summary_metrics = dict(updated_summary_metrics)\n", + " # summary_metrics = {k: summary_metrics[k] for k, v in summary_metrics.items() if any(selected_metric in k for selected_metric in metrics)}\n", + "\n", + " data.append(\n", + " go.Bar(\n", + " x=list(summary_metrics.keys()),\n", + " y=list(summary_metrics.values()),\n", + " name=title,\n", + " )\n", + " )\n", + "\n", + " fig = go.Figure(data=data)\n", + "\n", + " # Change the bar mode\n", + " fig.update_layout(barmode=\"group\", showlegend=True)\n", + " fig.show()\n", + "\n", + "\n", + "def generate_uuid(length: int = 8) -> str:\n", + " \"\"\"Generate a uuid of a specified length (default=8).\"\"\"\n", + " return \"\".join(random.choices(string.ascii_lowercase + string.digits, k=length))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gXkKKzk8OvFq" + }, + "source": [ + "## Load an evaluation dataset\n", + "\n", + "Load a subset of the `OpenOrca` dataset using the `huggingface/datasets` library.\n", + "\n", + "### Dataset Summary\n", + "\n", + "The OpenOrca dataset is a collection of augmented [FLAN Collection data](https://arxiv.org/abs/2301.13688). Currently ~1M GPT-4 completions, and ~3.2M GPT-3.5 completions. It is tabularized in alignment with the distributions presented in the ORCA paper and currently represents a partial completion of the full intended dataset, with ongoing generation to expand its scope. The data is primarily used for training and evaluation in the field of natural language processing.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "w-j7l4Qd0Ull" + }, + "outputs": [], + "source": [ + "from datasets import load_dataset\n", + "\n", + "ds = (\n", + " load_dataset(\n", + " \"Open-Orca/OpenOrca\",\n", + " data_files=\"1M-GPT4-Augmented.parquet\",\n", + " split=\"train[:100]\",\n", + " )\n", + " .to_pandas()\n", + " .drop(columns=[\"id\"])\n", + " .rename(columns={\"response\": \"reference\"})\n", + ")\n", + "\n", + "dataset = ds.sample(n=10)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kOeVlflg06F3" + }, + "source": [ + "#### Preview the dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zAHGlmkEelhm" + }, + "outputs": [], + "source": [ + "dataset.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kgrS1Ljo0VVQ" + }, + "source": [ + "## Run evaluation on 1P models\n", + "\n", + "The *Gen AI Evaluation Service SDK* includes native support for evaluating Gemini models. This streamlines the evaluation process for Google's latest and most capable family of large language models. With minimal coding effort, you can leverage pre-defined metrics and workflows to assess the performance of Gemini models on various tasks. You can also customize your own model-based metrics based on your specific evaluation criteria.\n", + "\n", + "This enhanced support enables you to:\n", + "\n", + "- **Quickly evaluate Gemini models:** Effortlessly assess the performance of Gemini models using the SDK's streamlined workflows.\n", + "- **Compare models side-by-side:** Benchmark Gemini against other models to understand relative strengths and weaknesses.\n", + "- **Analyze prompt templates:** Evaluate the effectiveness of different prompt designs for optimizing Gemini's performance.\n", + "\n", + "This native integration simplifies the evaluation process, allowing you to focus on understanding and improving the capabilities of Gemini." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "011966ad51c4" + }, + "source": [ + "#### Understand the `EvalTask` class\n", + "\n", + "The `EvalTask` class is a core component of the *Gen AI Evaluation Service SDK* framework. It allows you to define and run evaluation jobs against your Gen AI models/applications, providing a structured way to measure their performance on specific tasks. Think of an `EvalTask` as a blueprint for your evaluation process.\n", + "\n", + "`EvalTask` class requires an evaluation dataset and a list of metrics. Supported metrics are documented on the Generative AI on Vertex AI [Define your evaluation metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval) page. The dataset can be an `pandas.DataFrame`, Python dictionary or a file path URI and uses default column names such as \"prompt\", \"reference\", \"response\", and \"baseline_model_response\". \n", + "\n", + "* For bring-your-own-response (BYOR), the \"response\" column in the dataset is mandatory, and \"baseline_model_response\" is needed for pairwise metrics without a provided baseline model.\n", + "\n", + "* When a model is specified for generation, the \"prompt\" column in the dataset is required. Alternatively, when using a prompt template, the dataset needs columns matching the template's variables, and a \"prompt\" column can be assembled from those columns. \n", + "\n", + "\n", + "\n", + "`EvalTask` supports extensive evaluation scenarios including BYOR, model inference with Gemini models, 3P models endpoints/SDK clients, or custom generation functions, using prompt templates, custom-defined model-based metrics and pairwise metric side-by-side(SxS) comparisons. The .evaluate() method triggers the evaluation process, optionally taking a model, prompt template, experiment run name, and other evaluation run configurations. You can view the SDK reference documentation for [Gen AI Evaluation package](https://cloud.google.com/vertex-ai/generative-ai/docs/reference/python/latest/vertexai.evaluation) for more details." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "noHGa6rD6-ks" + }, + "source": [ + "### Define a model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ol1fa0hI7Irr" + }, + "outputs": [], + "source": [ + "# Model to be evaluated\n", + "model = GenerativeModel(\n", + " \"gemini-1.5-pro\",\n", + " generation_config={\"temperature\": 0.6, \"max_output_tokens\": 256, \"top_k\": 1},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xuvdzrUIayPp" + }, + "source": [ + "### Use computation-based metrics\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JrY-NLGg0TVS" + }, + "outputs": [], + "source": [ + "# Define an EvalTask with ROUGE-L-SUM metric\n", + "rouge_eval_task = EvalTask(\n", + " dataset=dataset,\n", + " metrics=[\"rouge_l_sum\"],\n", + " experiment=EXPERIMENT_NAME,\n", + ")\n", + "rouge_result = rouge_eval_task.evaluate(\n", + " model=model,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FSMRnDMixwLS" + }, + "outputs": [], + "source": [ + "display_eval_result(rouge_result)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gx9pKjiSbHag" + }, + "source": [ + "### Use model-based pointwise metrics" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jCmPRv4kHwPG" + }, + "source": [ + "#### Select a pointwise metric to use" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WuRlYbj0HvBB" + }, + "outputs": [], + "source": [ + "import ipywidgets as widgets\n", + "\n", + "pointwise_single_turn_metrics = [\n", + " metric\n", + " for metric in supported_example_metric_names\n", + " if not metric.startswith(\"pairwise\") and not metric.startswith(\"multi_turn\")\n", + "]\n", + "\n", + "dropdown = widgets.Dropdown(\n", + " options=pointwise_single_turn_metrics,\n", + " description=\"Select a metric:\",\n", + " font_weight=\"bold\",\n", + " style={\"description_width\": \"initial\"},\n", + ")\n", + "\n", + "\n", + "def dropdown_eventhandler(change):\n", + " global POINTWISE_METRIC\n", + " if change[\"type\"] == \"change\" and change[\"name\"] == \"value\":\n", + " POINTWISE_METRIC = change.new\n", + " print(\"Selected:\", change.new)\n", + "\n", + "\n", + "POINTWISE_METRIC = dropdown.value\n", + "dropdown.observe(dropdown_eventhandler, names=\"value\")\n", + "display(dropdown)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kBnyzogabOmp" + }, + "outputs": [], + "source": [ + "# Define an EvalTask with a pointwise model-based metric\n", + "pointwise_eval_task = EvalTask(\n", + " dataset=dataset,\n", + " metrics=[POINTWISE_METRIC],\n", + " experiment=EXPERIMENT_NAME,\n", + ")\n", + "\n", + "pointwise_result = pointwise_eval_task.evaluate(\n", + " model=model,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0EwLaPusyeL9" + }, + "outputs": [], + "source": [ + "display_eval_result(pointwise_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qsPD-YUWoRYo" + }, + "outputs": [], + "source": [ + "display_explanations(pointwise_result, metrics=[POINTWISE_METRIC], n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9AzW8Zg71Igs" + }, + "source": [ + "#### Build your own pointwise metric\n", + "\n", + "For more inforamation about metric customization, see this [notebook tutorial](https://colab.research.google.com/github/GoogleCloudPlatform/generative-ai/blob/main/gemini/evaluation/customize_model_based_metrics.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZKVjolkE5qoh" + }, + "outputs": [], + "source": [ + "# Create a unique model-based metric for your own use cases\n", + "linguistic_acceptability = PointwiseMetric(\n", + " metric=\"linguistic_acceptability\",\n", + " metric_prompt_template=PointwiseMetricPromptTemplate(\n", + " criteria={\n", + " \"Proper Grammar\": \"The language's grammar rules are correctly followed, including but not limited to sentence structures, verb tenses, subject-verb agreement, proper punctuation, and capitalization.\",\n", + " \"Appropriate word choice\": \"Words chosen are appropriate and purposeful given their relative context and positioning in the text. Vocabulary demonstrates prompt understanding.\",\n", + " \"Reference Alignment\": \"The response is consistent and aligned with the reference.\",\n", + " },\n", + " rating_rubric={\n", + " \"5\": \"Excellent: The writing is grammatically correct, uses appropriate vocabulary and aligns perfectly with the reference.\",\n", + " \"4\": \"Good: The writing is generally grammatically correct, uses appropriate vocabulary and aligns well with the reference.\",\n", + " \"3\": \"Satisfactory: The writing may have minor grammatical errors or use less-appropriate vocabulary, but it aligns reasonably well with the reference.\",\n", + " \"2\": \"Unsatisfactory: The writing has significant grammatical errors, uses inappropriate vocabulary, deviates significantly from the reference.\",\n", + " \"1\": \"Poor: The writing is riddled with grammatical errors, uses highly inappropriate vocabulary, is completely unrelated to the reference.\",\n", + " },\n", + " input_variables=[\"prompt\", \"reference\"],\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1MJdLUJz6H5F" + }, + "outputs": [], + "source": [ + "pointwise_eval_task = EvalTask(\n", + " dataset=dataset,\n", + " metrics=[linguistic_acceptability],\n", + " experiment=EXPERIMENT_NAME,\n", + ")\n", + "\n", + "pointwise_result = pointwise_eval_task.evaluate(\n", + " model=model,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UW5DXIGv7oIJ" + }, + "outputs": [], + "source": [ + "display_eval_result(pointwise_result)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sFHRuWjv86uG" + }, + "source": [ + "### Use model-based pairwise metrics\n", + "\n", + "Evaluate two Gen AI models side-by-side (SxS) with model-based pairwise metrics." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8Rd0qIOrMBRb" + }, + "source": [ + "#### Select a pairwise metric to use" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "E7Eqh9e9MDsa" + }, + "outputs": [], + "source": [ + "from IPython.display import display\n", + "import ipywidgets as widgets\n", + "\n", + "pairwise_single_turn_metrics = [\n", + " metric\n", + " for metric in supported_example_metric_names\n", + " if metric.startswith(\"pairwise\") and \"multi_turn\" not in metric\n", + "]\n", + "\n", + "dropdown = widgets.Dropdown(\n", + " options=pairwise_single_turn_metrics,\n", + " description=\"Select a metric:\",\n", + " font_weight=\"bold\",\n", + " style={\"description_width\": \"initial\"},\n", + ")\n", + "\n", + "\n", + "def dropdown_eventhandler(change):\n", + " global pairwise_metric_name\n", + " if change[\"type\"] == \"change\" and change[\"name\"] == \"value\":\n", + " pairwise_metric = change.new\n", + " print(\"Selected:\", change.new)\n", + "\n", + "\n", + "pairwise_metric_name = dropdown.value\n", + "dropdown.observe(dropdown_eventhandler, names=\"value\")\n", + "display(dropdown)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yLTaArLre0TE" + }, + "source": [ + "#### Define a baseline model to compare against" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4u-WJVK2-SI5" + }, + "outputs": [], + "source": [ + "# Define a baseline model for pairwise comparison\n", + "baseline_model = GenerativeModel(\"gemini-1.5-flash-001\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rJd6sF7re5gC" + }, + "source": [ + "#### Run SxS evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_yoZrZsiM4Sn" + }, + "outputs": [], + "source": [ + "# Create a pairwise metric\n", + "PAIRWISE_METRIC = PairwiseMetric(\n", + " metric=pairwise_metric_name,\n", + " metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template(\n", + " pairwise_metric_name\n", + " ),\n", + " baseline_model=baseline_model,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sDAaYDqY9FBu" + }, + "outputs": [], + "source": [ + "pairwise_eval_task = EvalTask(\n", + " dataset=dataset,\n", + " metrics=[PAIRWISE_METRIC],\n", + " experiment=EXPERIMENT_NAME,\n", + ")\n", + "# Specify a candidate model for pairwise comparison\n", + "pairwise_result = pairwise_eval_task.evaluate(\n", + " model=model,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bPAoJu53-mfx" + }, + "outputs": [], + "source": [ + "display_eval_result(\n", + " pairwise_result,\n", + " title=\"Gemini-1.5-Flash vs. Gemini-1.5-Pro SxS Pairwise Evaluation Results\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "olbyG8ZVOvFq" + }, + "source": [ + "## Run evaluation on 3P Models\n", + "\n", + "The *Vertex Gen AI Evaluation Service SDK* provides robust support for evaluating third-party (3P) models, allowing you to assess and compare models from various sources. The *Gen AI Evaluation Service SDK* allows you to provide a generic Python function as input to specify how the model/application should be invoked for batch inference, which could be done through an endpoint or an SDK. This flexible approach accommodates a wide range of open and closed models.\n", + "\n", + "**Open Models:**\n", + "\n", + "Evaluate open models like a pre-trained `Llama 3.1` or a fine-tuned `Llama 3` models deployed with Vertex AI Model Garden using the *Gen AI Evaluation Service SDK*. This enables you to assess the performance of these models and understand how they align with your specific requirements.\n", + "\n", + "**Closed Models:**\n", + "\n", + "Evaluate the performance of closed model APIs, such as Anthropic's `Claude 3.5 Sonnet`, hosted on Vertex AI. This allows you to compare the capabilities of different closed models and make informed decisions about which best suits your needs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HXfM_yJwiuh3" + }, + "outputs": [], + "source": [ + "EXPERIMENT_NAME = \"gen-ai-eval-3p-experiment\" # @param {type:\"string\"}\n", + "\n", + "# Define an EvalTask with a list of metrics\n", + "pointwise_eval_task = EvalTask(\n", + " dataset=dataset,\n", + " metrics=[\n", + " \"coherence\",\n", + " \"fluency\",\n", + " \"instruction_following\",\n", + " \"text_quality\",\n", + " \"rouge_l_sum\",\n", + " linguistic_acceptability,\n", + " ],\n", + " experiment=EXPERIMENT_NAME,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UGokrtdiIHrX" + }, + "source": [ + "### Run Evaluation on Llama 3.1 API Endpoint\n", + "\n", + "You can experiment with various supported Llama models.\n", + "\n", + "This tutorial use Llama 3 8B Instruct, 70B Instruct, and 405B Instruct using Model-as-a-Service (MaaS). Using Model-as-a-Service (MaaS), you can access Llama 3.1 models in just a few clicks without any setup or infrastructure hassles. Model-as-a-Service (MaaS) integrates [Llama Guard](https://huggingface.co/meta-llama/Llama-Guard-3-8B) as a safety filter. It is switched on by default and can be switched off. Llama Guard enables us to safeguard model inputs and outputs. If a response is filtered, it will be populated with a `finish_reason` field (with value `content_filtered`) and a `refusal` field (stating the filtering reason).\n", + "\n", + "You can also access Llama models for self-service in Vertex AI Model Garden, allowing you to choose your preferred infrastructure. [Check out Llama 3.1 model card](https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama3_1?_ga=2.31261500.2048242469.1721714335-1107467625.1721655511) to learn how to deploy a Llama 3.1 models on Vertex AI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "r7OhyH46H2H5" + }, + "outputs": [], + "source": [ + "MODEL_ID = \"meta/llama3-8b-instruct-maas\" # @param {type:\"string\"} [\"meta/llama3-8b-instruct-maas\", \"meta/llama3-70b-instruct-maas\", \"meta/llama3-405b-instruct-maas\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KUgMmggOa_TG" + }, + "source": [ + "#### Authentication\n", + "\n", + "You can request an access token from the default credentials for the current environment. Note that the access token lives for [1 hour by default](https://cloud.google.com/docs/authentication/token-types#at-lifetime); after expiration, it must be refreshed.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OT03q0Oqa86s" + }, + "outputs": [], + "source": [ + "credentials, _ = default()\n", + "auth_request = transport.requests.Request()\n", + "credentials.refresh(auth_request)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PtpB2ig9bCxs" + }, + "source": [ + "Then configure the OpenAI SDK to point to the Llama 3.1 API endpoint.\n", + "\n", + "Note: only `us-central1` is supported region for Llama 3.1 models using Model-as-a-Service (MaaS)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5TazdE-CbEdW" + }, + "outputs": [], + "source": [ + "MODEL_LOCATION = \"us-central1\"\n", + "\n", + "client = openai.OpenAI(\n", + " base_url=f\"https://{MODEL_LOCATION}-aiplatform.googleapis.com/v1beta1/projects/{PROJECT_ID}/locations/{MODEL_LOCATION}/endpoints/openapi/chat/completions?\",\n", + " api_key=credentials.token,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zMPNCDTsbqwc" + }, + "source": [ + "#### Set model configurations for Llama 3.1\n", + "\n", + "Use the following parameters to generate different answers:\n", + "\n", + "* `temperature` to control the randomness of the response\n", + "* `max_tokens` to limit the response length\n", + "* `top_p` to control the quality of the response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "VxQbGCHHbnot" + }, + "outputs": [], + "source": [ + "temperature = 1.0 # @param {type:\"number\"}\n", + "max_tokens = 50 # @param {type:\"integer\"}\n", + "top_p = 1.0 # @param {type:\"number\"}\n", + "apply_llama_guard = True # @param {type:\"boolean\"}\n", + "\n", + "response = client.chat.completions.create(\n", + " model=MODEL_ID,\n", + " messages=[{\"role\": \"user\", \"content\": \"Hello, Llama 3.1!\"}],\n", + " extra_body={\n", + " \"extra_body\": {\n", + " \"google\": {\n", + " \"model_safety_settings\": {\n", + " \"enabled\": apply_llama_guard,\n", + " \"llama_guard_settings\": {},\n", + " }\n", + " }\n", + " }\n", + " },\n", + ")\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wqoSsvMlcYPM" + }, + "source": [ + "#### Define the Llama 3.1 Model Function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CdiP_zMWbzp0" + }, + "outputs": [], + "source": [ + "def llama_model_fn(prompt: str) -> str:\n", + " response = client.chat.completions.create(\n", + " model=MODEL_ID,\n", + " messages=[{\"role\": \"user\", \"content\": prompt}],\n", + " temperature=temperature,\n", + " max_tokens=max_tokens,\n", + " top_p=top_p,\n", + " extra_body={\n", + " \"extra_body\": {\n", + " \"google\": {\n", + " \"model_safety_settings\": {\n", + " \"enabled\": apply_llama_guard,\n", + " \"llama_guard_settings\": {},\n", + " }\n", + " }\n", + " }\n", + " },\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nmtUTC1jTZT2" + }, + "source": [ + "#### Run evaluation on Llama 3.1 API Service" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zEe6nnKNTZT2" + }, + "outputs": [], + "source": [ + "# Create a unique model-based metric for your own use cases\n", + "linguistic_acceptability = PointwiseMetric(\n", + " metric=\"linguistic_acceptability\",\n", + " metric_prompt_template=PointwiseMetricPromptTemplate(\n", + " criteria={\n", + " \"Proper Grammar\": \"The language's grammar rules are correctly followed, including but not limited to sentence structures, verb tenses, subject-verb agreement, proper punctuation, and capitalization.\",\n", + " \"Appropriate word choice\": \"Words chosen are appropriate and purposeful given their relative context and positioning in the text. Vocabulary demonstrates prompt understanding.\",\n", + " \"Reference Alignment\": \"The response is consistent and aligned with the reference.\",\n", + " },\n", + " rating_rubric={\n", + " \"5\": \"Excellent: The writing is grammatically correct, uses appropriate vocabulary and aligns perfectly with the reference.\",\n", + " \"4\": \"Good: The writing is generally grammatically correct, uses appropriate vocabulary and aligns well with the reference.\",\n", + " \"3\": \"Satisfactory: The writing may have minor grammatical errors or use less-appropriate vocabulary, but it aligns reasonably well with the reference.\",\n", + " \"2\": \"Unsatisfactory: The writing has significant grammatical errors, uses inappropriate vocabulary, deviates significantly from the reference.\",\n", + " \"1\": \"Poor: The writing is riddled with grammatical errors, uses highly inappropriate vocabulary, is completely unrelated to the reference.\",\n", + " },\n", + " input_variables=[\"prompt\", \"reference\"],\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1are_m2DTZT2" + }, + "outputs": [], + "source": [ + "llama_result = pointwise_eval_task.evaluate(\n", + " model=llama_model_fn,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + " experiment_run_name=f\"eval-llama-3-1-{generate_uuid()}\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gD3zu0LLTZT2" + }, + "outputs": [], + "source": [ + "display_eval_result(llama_result, title=\"Llama 3.1 API Service Evaluation Results\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BUWSXVToTZT2" + }, + "outputs": [], + "source": [ + "display_explanations(llama_result, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qS2BpbzMIpZc" + }, + "source": [ + "### Run Evaluation on Claude 3\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "4j9taDaPJHy9" + }, + "outputs": [], + "source": [ + "MODEL = \"claude-3-5-sonnet@20240620\" # @param [\"claude-3-5-sonnet@20240620\", \"claude-3-opus@20240229\", \"claude-3-haiku@20240307\", \"claude-3-sonnet@20240229\" ]\n", + "if MODEL == \"claude-3-5-sonnet@20240620\":\n", + " available_regions = [\"europe-west1\", \"us-east5\"]\n", + "elif MODEL == \"claude-3-opus@20240229\":\n", + " available_regions = [\"us-east5\"]\n", + "elif MODEL == \"claude-3-haiku@20240307\":\n", + " available_regions = [\"us-east5\", \"europe-west1\"]\n", + "elif MODEL == \"claude-3-sonnet@20240229\":\n", + " available_regions = [\"us-east5\"]\n", + "\n", + "dropdown = widgets.Dropdown(\n", + " options=available_regions,\n", + " description=\"Select a location:\",\n", + " font_weight=\"bold\",\n", + " style={\"description_width\": \"initial\"},\n", + ")\n", + "\n", + "\n", + "def dropdown_eventhandler(change):\n", + " global LOCATION\n", + " if change[\"type\"] == \"change\" and change[\"name\"] == \"value\":\n", + " LOCATION = change.new\n", + " print(\"Selected:\", change.new)\n", + "\n", + "\n", + "LOCATION = dropdown.value\n", + "dropdown.observe(dropdown_eventhandler, names=\"value\")\n", + "display(dropdown)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lVbFk9pSd6L0" + }, + "source": [ + "#### Define the Claude 3 Model Function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jKLjm0o3KwEI" + }, + "outputs": [], + "source": [ + "def anthropic_claude_model_fn(prompt):\n", + " client = AnthropicVertex(region=LOCATION, project_id=PROJECT_ID)\n", + " message = client.messages.create(\n", + " max_tokens=1024,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"{prompt}\",\n", + " }\n", + " ],\n", + " model=MODEL,\n", + " )\n", + " response = message.content[0].text\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N73OcARGgMUT" + }, + "source": [ + "#### Run evaluation on Claude 3 Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gdGUsc7gIpZi" + }, + "outputs": [], + "source": [ + "run_id = generate_uuid()\n", + "experiment_run_name = f\"eval-claude-3-{run_id}\"\n", + "\n", + "claude_result = pointwise_eval_task.evaluate(\n", + " model=anthropic_claude_model_fn,\n", + " prompt_template=\"# System_prompt\\n{system_prompt} # Question\\n{question}\",\n", + " experiment_run_name=experiment_run_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_URkaZhYIpZi" + }, + "outputs": [], + "source": [ + "display_eval_result(claude_result, title=\"Claude-3.5-Sonnet Evaluation Results\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "38IJUEXthXsE" + }, + "outputs": [], + "source": [ + "display_explanations(claude_result, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y_7R1QLdgoCp" + }, + "source": [ + "### Visualize Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3rE3n7E9gyEo" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "eval_results = [(\"Llama 3.1\", llama_result), (\"Claude 3.5 Sonnet\", claude_result)]\n", + "display_bar_plot(\n", + " eval_results,\n", + " metrics=[\n", + " \"coherence\",\n", + " \"fluency\",\n", + " \"instruction_following\",\n", + " \"text_quality\",\n", + " \"rouge_l_sum\",\n", + " linguistic_acceptability,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5llc8RGSjIId" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_radar_plot(\n", + " eval_results,\n", + " metrics=[\n", + " \"coherence\",\n", + " \"fluency\",\n", + " \"instruction_following\",\n", + " \"text_quality\",\n", + " \"linguistic_acceptability\",\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "knLMyJJZTJqk" + }, + "source": [ + "### View Experiment log for evaluation runs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "O48Hn7brTJYa" + }, + "outputs": [ + { + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "summary": "{\n \"name\": \"pointwise_eval_task\",\n \"rows\": 2,\n \"fields\": [\n {\n \"column\": \"experiment_name\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"gen-ai-eval-3p-experiment\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"run_name\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 2,\n \"samples\": [\n \"eval-llama-3-1-kq13qu77\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"run_type\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"system.ExperimentRun\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"state\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"COMPLETE\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"param.prompt_template\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 1,\n \"samples\": [\n \"# System_prompt\\n{system_prompt} # Question\\n{question}\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.rouge_l_sum/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.055117194889343274,\n \"min\": 0.07833343275693971,\n \"max\": 0.15628091728941002,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.07833343275693971\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.text_quality/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.14907119849998596,\n \"min\": 0.6324555320336759,\n \"max\": 0.8432740427115678,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.6324555320336759\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.instruction_following/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 1.2020815280171309,\n \"min\": 1.9,\n \"max\": 3.6,\n \"num_unique_values\": 2,\n \"samples\": [\n 1.9\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.fluency/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.22798464466519278,\n \"min\": 0.5208550662132413,\n \"max\": 0.8432740427115678,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.5208550662132413\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.coherence/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.6484162815045169,\n \"min\": 0.49721446300587663,\n \"max\": 1.4142135623730951,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.49721446300587663\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.linguistic_acceptability/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.9192388155425117,\n \"min\": 1.7,\n \"max\": 3.0,\n \"num_unique_values\": 2,\n \"samples\": [\n 1.7\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.fluency/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 1.610082140761769,\n \"min\": 2.123,\n \"max\": 4.4,\n \"num_unique_values\": 2,\n \"samples\": [\n 2.123\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.coherence/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 1.8031222920256962,\n \"min\": 1.45,\n \"max\": 4.0,\n \"num_unique_values\": 2,\n \"samples\": [\n 1.45\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.instruction_following/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.4286068205728391,\n \"min\": 0.5676462121975466,\n \"max\": 1.1737877907772674,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.5676462121975466\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.rouge_l_sum/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.13354934150332623,\n \"min\": 0.19088731799999997,\n \"max\": 0.379754608,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.19088731799999997\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.linguistic_acceptability/std\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.3392358788185142,\n \"min\": 0.674948557710553,\n \"max\": 1.1547005383792515,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.674948557710553\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.text_quality/mean\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 1.131370849898476,\n \"min\": 1.8,\n \"max\": 3.4,\n \"num_unique_values\": 2,\n \"samples\": [\n 1.8\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metric.row_count\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.0,\n \"min\": 10.0,\n \"max\": 10.0,\n \"num_unique_values\": 1,\n \"samples\": [\n 10.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}", + "type": "dataframe" + }, + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
experiment_namerun_namerun_typestateparam.prompt_templatemetric.rouge_l_sum/stdmetric.text_quality/stdmetric.instruction_following/meanmetric.fluency/stdmetric.coherence/stdmetric.linguistic_acceptability/meanmetric.fluency/meanmetric.coherence/meanmetric.instruction_following/stdmetric.rouge_l_sum/meanmetric.linguistic_acceptability/stdmetric.text_quality/meanmetric.row_count
0gen-ai-eval-3p-experimenteval-claude-3-80kv797zsystem.ExperimentRunCOMPLETE# System_prompt\\n{system_prompt} # Question\\n{question}0.1562810.8432743.60.8432741.4142143.04.4004.001.1737880.3797551.1547013.410.0
1gen-ai-eval-3p-experimenteval-llama-3-1-kq13qu77system.ExperimentRunCOMPLETE# System_prompt\\n{system_prompt} # Question\\n{question}0.0783330.6324561.90.5208550.4972141.72.1231.450.5676460.1908870.6749491.810.0
\n", + "
\n", + "
\n", + "\n", + "
\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + "\n", + "\n", + "
\n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
\n", + "\n", + "
\n", + "
\n" + ], + "text/plain": [ + " experiment_name run_name run_type \\\n", + "0 gen-ai-eval-3p-experiment eval-claude-3-80kv797z system.ExperimentRun \n", + "1 gen-ai-eval-3p-experiment eval-llama-3-1-kq13qu77 system.ExperimentRun \n", + "\n", + " state param.prompt_template \\\n", + "0 COMPLETE # System_prompt\\n{system_prompt} # Question\\n{question} \n", + "1 COMPLETE # System_prompt\\n{system_prompt} # Question\\n{question} \n", + "\n", + " metric.rouge_l_sum/std metric.text_quality/std \\\n", + "0 0.156281 0.843274 \n", + "1 0.078333 0.632456 \n", + "\n", + " metric.instruction_following/mean metric.fluency/std \\\n", + "0 3.6 0.843274 \n", + "1 1.9 0.520855 \n", + "\n", + " metric.coherence/std metric.linguistic_acceptability/mean \\\n", + "0 1.414214 3.0 \n", + "1 0.497214 1.7 \n", + "\n", + " metric.fluency/mean metric.coherence/mean \\\n", + "0 4.400 4.00 \n", + "1 2.123 1.45 \n", + "\n", + " metric.instruction_following/std metric.rouge_l_sum/mean \\\n", + "0 1.173788 0.379755 \n", + "1 0.567646 0.190887 \n", + "\n", + " metric.linguistic_acceptability/std metric.text_quality/mean \\\n", + "0 1.154701 3.4 \n", + "1 0.674949 1.8 \n", + "\n", + " metric.row_count \n", + "0 10.0 \n", + "1 10.0 " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "pointwise_eval_task.display_runs()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yLHuqe2D-x81" + }, + "source": [ + "## Prompt Engineering" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cd393d80e252" + }, + "source": [ + "The *Vertex AI Gen AI Evaluation Service SDK* simplifies prompt engineering by streamlining the process of creating and evaluating multiple prompt templates. It allows you to efficiently test different prompts against a chosen dataset and compare their performance using comprehensive evaluation metrics. This empowers you to identify the most effective prompts for your specific use case and optimize your generative AI applications." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "601693fa0461" + }, + "source": [ + "### Design a prompt with Prompt Template" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a53e4c50779f" + }, + "outputs": [], + "source": [ + "system_instruction = \"You are a poetic assistant, skilled in explaining complex concepts with creative flair.\"\n", + "question = \"How does LLM work?\"\n", + "requirements = \"Explain concepts in great depth using simple terms, and give examples to help people learn. At the end of each explanation, you ask a question to check for understanding\"\n", + "\n", + "prompt_template = f\"{system_instruction} Answer this question: {question}, and follow the requirements: {requirements}.\"\n", + "\n", + "\n", + "model_response = (\n", + " GenerativeModel(\"gemini-1.5-pro\")\n", + " .generate_content(prompt_template)\n", + " .candidates[0]\n", + " .content.parts[0]\n", + " .text\n", + ")\n", + "\n", + "\n", + "display(HTML(f\"

Assembled Prompt:


{prompt_template}

\"))\n", + "display(HTML(\"

Model Response:


\"))\n", + "Markdown(model_response)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5261f69b615a" + }, + "source": [ + "### Compare and optimize prompt template design" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7d01f70c3163" + }, + "source": [ + "#### Define an evaluation dataset\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2157353f3309" + }, + "source": [ + "To perform pointwise inference, the evaluation dataset is required to contain the following fields:\n", + "\n", + "* Instruction: Part of the input user prompt. It refers to the inference instruction that is sent to your LLM.\n", + "* Context: User input for the Gen AI model or application in the current turn.\n", + "* Reference: The ground truth to compare your LLM response to.\n", + "\n", + "Your dataset must include a minimum of one evaluation example. We recommend around 100 examples to ensure high-quality aggregated metrics and statistically significant results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "972ebabb2712" + }, + "outputs": [], + "source": [ + "instruction = \"Summarize the following article: \\n\"\n", + "\n", + "context = [\n", + " \"Typhoon Phanfone has killed at least one person, a US airman on Okinawa who was washed away by high waves. Thousands of households have lost power and Japan's two largest airlines have suspended many flights. The storm also forced the suspension of the search for people missing after last week's volcanic eruption. The storm-tracking website Tropical Storm Risk forecasts that Phanfone will rapidly lose power over the next few hours as it goes further into the Pacific Ocean. Typhoon Phanfone was downgraded from an earlier status of a super typhoon, but the Japan Meteorological Agency had warned it was still a dangerous storm. Japan averages 11 typhoons a year, according to its weather agency. The typhoon made landfall on Monday morning near the central city of Hamamatsu, with winds of up to 180 km/h (112 mph). The airman was one of three US military personnel swept away by high waves whipped up by the typhoon off southern Okinawa island, where the US has a large military base. The remaining two are still missing. A police spokesman said they had been taking photographs of the sea. A university student who was surfing off the seas of Kanagawa Prefecture, south of Tokyo, was also missing, national broadcast NHK reports. It said at least 10 people had been injured and 9,500 houses were without power. The storm was expected to deposit about 100mm of rain on Tokyo over 24 hours, according to the Transport Ministry website. Many schools were closed on Monday and two car companies in Japan halted production at some plants ahead of the storm. More than 174 domestic flights were affected nationwide, NHK state broadcaster said on Sunday. On Sunday, heavy rain delayed the Japanese Formula One Grand Prix in Suzaka. French driver Jules Bianchi lost control in the wet conditions and crashed, sustaining a severe head injury.\",\n", + " \"The blaze started at the detached building in Drivers End in Codicote, near Welwyn, during the morning. There was another fire at the building 20 years ago, after which fire-proof foil was placed under the thatch, which is protecting the main building. More than 15 fire engines and support vehicles were called to tackle the blaze. Roads in the area were closed and traffic diverted.\",\n", + " 'The 18-year-old fell at the New Charter Academy on Broadoak Road in Ashton-under-Lyne at about 09:10 BST, Greater Manchester Police (GMP) said. GMP said he had gone to Manchester Royal Infirmary and his condition was \"serious\". Principal Jenny Langley said the school would remain \"fully open\" while police investigated. \"Our thoughts are with the family and we\\'re doing everything we can to support them along with staff and pupils,\" she said.',\n", + " 'But Belgian-born Dutchman Max Verstappen was unable to drive a car legally on his own in either country. That all changed on Wednesday when the youngster turned 18 and passed his driving test at the first attempt. Despite having competed in 14 grands prix since his debut in Australia in March, Verstappen admitted to feeling the pressure during his test. \"It\\'s a relief,\" said the Toro Rosso driver, who finished ninth in Japan on Sunday and had only started driving lessons a week ago. \"I was a bit nervous to make mistakes, but the exam went well.\" A bonus of turning 18 is that Verstappen will now be able to drink the champagne if he ever makes it onto the podium.',\n", + "]\n", + "\n", + "reference = [\n", + " \"A powerful typhoon has brought many parts of Japan to a standstill and briefly battered Tokyo before heading out to sea.\",\n", + " \"A major fire has been burning in the thatched roof of a large property in Hertfordshire.\",\n", + " \"A student has been taken to hospital after falling from a balcony at a Greater Manchester school.\",\n", + " \"He is Formula 1's youngest ever driver and in charge of a car that can reach over 200mph.\",\n", + "]\n", + "\n", + "response = [\n", + " \"Typhoon Phanfone, while downgraded from super typhoon status, caused significant disruption and tragedy in Japan. One US airman died after being swept away by high waves, with two more missing. The storm caused power outages for thousands, flight cancellations, and the suspension of rescue efforts for missing volcano victims. Heavy rain and strong winds led to school and factory closures, transportation disruptions, and at least 10 injuries. The typhoon is expected to weaken as it moves over the Pacific Ocean.\",\n", + " \"A large fire broke out in a detached thatched building in Codicote, near Welwyn. This is the second fire at the building in 20 years. Thankfully, fire-proof foil installed after the previous fire is protecting the main building. Over 15 fire engines and support vehicles responded, closing roads and diverting traffic in the area.\",\n", + " \"An 18-year-old student at New Charter Academy in Ashton-under-Lyne suffered a serious fall and was hospitalized. The incident is under investigation by Greater Manchester Police, but the school remains open. The principal expressed support for the student's family and the school community.\",\n", + " \"Max Verstappen, a Formula One driver, was finally able to get his driver's license at age 18. Despite already competing in 14 Grand Prix races, he was not of legal driving age in his native countries. He admitted to being nervous but passed the test on his first attempt. As an added bonus of turning 18, Verstappen can now enjoy champagne on the podium if he places.\",\n", + "]\n", + "\n", + "eval_dataset = pd.DataFrame(\n", + " {\n", + " \"instruction\": instruction,\n", + " \"context\": context,\n", + " \"reference\": reference,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "baff9d1cca96" + }, + "source": [ + "#### Define prompt templates to compare\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f639a79316c1" + }, + "outputs": [], + "source": [ + "prompt_templates = [\n", + " \"Instruction: {instruction} such that you'r explaining it to a 5 year old. Article: {context}. Summary:\",\n", + " \"Article: {context}. Complete this task: {instruction}. Summary:\",\n", + " \"Goal: {instruction} and give me a TLDR in five words. Here's an article: {context}. Summary:\",\n", + " \"Article: {context}. Reference Summary: {reference}. {instruction} to be more concise and verbose than the reference.\",\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ncJ-4uA_nxNB" + }, + "source": [ + "#### Define a model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3155f46d31c6" + }, + "outputs": [], + "source": [ + "generation_config = {\"temperature\": 0.3, \"max_output_tokens\": 256, \"top_k\": 1}\n", + "\n", + "gemini_model = GenerativeModel(\n", + " \"gemini-1.5-pro\",\n", + " generation_config=generation_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c3c1121684f5" + }, + "source": [ + "#### Define an EvalTask" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "20eb95c14422" + }, + "outputs": [], + "source": [ + "metrics = [\n", + " \"rouge_l_sum\",\n", + " \"bleu\",\n", + " \"fluency\",\n", + " \"coherence\",\n", + " \"safety\",\n", + " \"groundedness\",\n", + " \"summarization_quality\",\n", + " \"verbosity\",\n", + " \"instruction_following\",\n", + " \"text_quality\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "052ec86e5777" + }, + "outputs": [], + "source": [ + "experiment_name = \"eval-sdk-prompt-engineering\" # @param {type:\"string\"}\n", + "\n", + "summarization_eval_task = EvalTask(\n", + " dataset=eval_dataset,\n", + " metrics=metrics,\n", + " experiment=experiment_name,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "515fe8c3652f" + }, + "source": [ + "#### Run Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2b8422213915" + }, + "outputs": [], + "source": [ + "eval_results = []\n", + "for i, prompt_template in enumerate(prompt_templates):\n", + " eval_result = summarization_eval_task.evaluate(\n", + " prompt_template=prompt_template,\n", + " model=model,\n", + " # Customize eval service rate limit based on your project's Gemini-1.5-pro model quota to improve speed.\n", + " # See more details in https://cloud.google.com/vertex-ai/generative-ai/docs/models/run-evaluation#increase-quota\n", + " evaluation_service_qps=1,\n", + " )\n", + "\n", + " eval_results.append((f\"Prompt Template #{i+1}\", eval_result))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ec6e97cf9f27" + }, + "source": [ + "#### Display Evaluation report and explanations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d3724b416b39" + }, + "outputs": [], + "source": [ + "for result in eval_results:\n", + " display_eval_result(title=result[0], eval_result=result[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0615d0925469" + }, + "outputs": [], + "source": [ + "for eval_result in eval_results:\n", + " display_explanations(eval_result[1], metrics=[\"summarization_quality\"], n=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GzWSUPj2oV-_" + }, + "source": [ + "#### Visualize Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SSDyxE2aoYMH" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_radar_plot(\n", + " eval_results,\n", + " metrics=[\"instruction_following\", \"fluency\", \"coherence\", \"text_quality\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Id5sjYHboZHh" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_bar_plot(\n", + " eval_results,\n", + " metrics=[\"instruction_following\", \"fluency\", \"coherence\", \"text_quality\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ed966819648e" + }, + "source": [ + "#### View Experiment log for evaluation runs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "363c1b2553b9" + }, + "outputs": [], + "source": [ + "summarization_eval_task.display_runs()" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [ + "tfQ7sPtOjZOw", + "F_Gw6YLeOvFq" + ], + "name": "evaluate_models_in_vertex_ai_studio_and_model_garden.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb b/gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb index ff16f58121..4e9237b399 100644 --- a/gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb +++ b/gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb @@ -86,7 +86,7 @@ "## Overview\n", "\n", "\n", - "In this tutorial, you will learn how to use the use the *Vertex AI Python SDK for Gen AI Evaluation* to evaluate **Retrieval-Augmented Generation** (RAG) generated answers for **Question Answering** (QA) task.\n", + "In this tutorial, you will learn how to use the use the *Vertex AI Python SDK for Gen AI Evaluation Service* to evaluate **Retrieval-Augmented Generation** (RAG) generated answers for **Question Answering** (QA) task.\n", "\n", "RAG is a technique to improve groundness, relevancy and factuality of large language models (LLMs) by finding relevant information from the model's knowledge base. RAG is done by converting a query into a vector representation (embeddings), and then finding the most similar vectors in the knowledge base. The most similar vectors are then used to help generate the response.\n", "\n", @@ -98,7 +98,9 @@ "\n", "The examples used in this notebook is from Stanford Question Answering Dataset [SQuAD 2.0](https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1194/reports/default/15785042.pdf).\n", "\n", - "Learn more about [Vertex AI Rapid Evaluation SDK](https://cloud.google.com/vertex-ai/generative-ai/docs/models/online-pipeline-services).\n" + "See also: \n", + "\n", + "- Learn more about [Vertex Gen AI Evaluation Service SDK](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview)." ] }, { diff --git a/gemini/evaluation/intro_to_gen_ai_evaluation_service_sdk.ipynb b/gemini/evaluation/intro_to_gen_ai_evaluation_service_sdk.ipynb index 50e5159798..9dea5198c5 100644 --- a/gemini/evaluation/intro_to_gen_ai_evaluation_service_sdk.ipynb +++ b/gemini/evaluation/intro_to_gen_ai_evaluation_service_sdk.ipynb @@ -29,7 +29,7 @@ "id": "5e_7VOHBer8D" }, "source": [ - " # Getting Started with Vertex AI Python SDK for Gen AI Evaluation Service\n", + " # Getting Started with Vertex AI Python SDK for Gen AI Evaluation Service \n", "\n", "\n", "
\n", diff --git a/gemini/evaluation/migration_guide_preview_to_GA_sdk.ipynb b/gemini/evaluation/migration_guide_preview_to_GA_sdk.ipynb index 231164f7a0..641c4023e1 100644 --- a/gemini/evaluation/migration_guide_preview_to_GA_sdk.ipynb +++ b/gemini/evaluation/migration_guide_preview_to_GA_sdk.ipynb @@ -29,7 +29,7 @@ "id": "5hCmIhlJjZOt" }, "source": [ - "# Gen AI Evaluation Service SDK Preview-to-GA Migration Guide\n", + "# Gen AI Evaluation Service SDK Preview-to-GA Migration Guide | Gen AI Evaluation SDK Tutorial\n", "\n", "\n", "\n", @@ -106,7 +106,8 @@ "* How to handle discontinued metrics\n", "* How to handle the new input schema\n", "\n", - "To learn more about the GA release details, please refer to the latest documentation and notebook tutorials in [Generative AI on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview).\n", + "\n", + "To learn more about the GA release details, please refer to the latest documentation and notebook tutorials in [Vertex Gen AI Evaluation Service](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview).\n", "\n", "The examples used in this notebook is from Stanford Question Answering Dataset [SQuAD 2.0](https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1194/reports/default/15785042.pdf).\n" ] diff --git a/gemini/evaluation/prompt_engineering_gen_ai_evaluation_service_sdk.ipynb b/gemini/evaluation/prompt_engineering_gen_ai_evaluation_service_sdk.ipynb index 51975dd22f..0d8a5eb969 100644 --- a/gemini/evaluation/prompt_engineering_gen_ai_evaluation_service_sdk.ipynb +++ b/gemini/evaluation/prompt_engineering_gen_ai_evaluation_service_sdk.ipynb @@ -83,7 +83,11 @@ "\n", "* Evaluate and refine the prompt templates systematically for the `EvalTask`.\n", "\n", - "* Compare and choose the prompt template with the best evaluation performance." + "* Compare and choose the prompt template with the best evaluation performance.\n", + "\n", + "See also: \n", + "\n", + "- Learn more about [Vertex Gen AI Evaluation Service SDK](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview)." ] }, { From f5b615c12f8d01b32c605b8280ff5a0469691e60 Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Tue, 1 Oct 2024 16:38:20 +0800 Subject: [PATCH 31/76] updating token count notebook (#1200) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...ning_token_count_and_cost_estimation.ipynb | 1076 ++++++++++++----- 1 file changed, 801 insertions(+), 275 deletions(-) diff --git a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb index 1af8cb4092..79cd82d819 100644 --- a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb +++ b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb @@ -29,7 +29,7 @@ "id": "JAPoU8Sm5E6e" }, "source": [ - "# Vertex AI Supervised Tuning token count and cost estimation.\n", + "# Vertex AI Supervised tuning token count and cost estimation.\n", "\n", "
\n", " - + - +
\n", @@ -74,10 +74,9 @@ "source": [ "## Overview\n", "\n", - "This notebook serves as a tool to preprocess and estimate token counts for tuning costs for tuning [`gemini-1.0-pro-002`](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning).\n", - "\n", - "You can also find the code to check your dataset for Vertex AI Gemini `gemini-1.5-pro-001`.\n", + "This notebook serves as a tool to preprocess and estimate token counts for tuning costs for tuning [`gemini-1.5-pro-002`](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning).\n", "\n", + "At the end you will also find the code to preprocess and estimate token counts for tuning costs for tuning `gemini-1.0-pro-002`. If you get started please start with `gemini-1.5-pro-002`.\n", "\n", "For how to prepare dataset for tuning gemini, please refer to this [tutorial](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-about)." ] @@ -102,22 +101,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 24, "metadata": { "id": "tFy3H3aPgx12" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/5.3 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m5.2/5.3 MB\u001b[0m \u001b[31m161.8 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.3/5.3 MB\u001b[0m \u001b[31m80.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h\u001b[33m WARNING: The script tb-gcp-uploader is installed in '/root/.local/bin' which is not on PATH.\n", - " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\u001b[33m\n", - "\u001b[0m" - ] - } - ], + "outputs": [], "source": [ "%pip install --upgrade --user --quiet google-cloud-aiplatform[tokenization] numpy==1.26.4 tensorflow" ] @@ -137,20 +125,24 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 25, "metadata": { - "id": "XRvKdaPDTznN" + "id": "XRvKdaPDTznN", + "outputId": "30089739-607e-433e-8592-6e5b44e914c4", + "colab": { + "base_uri": "https://localhost:8080/" + } }, "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ "{'status': 'ok', 'restart': True}" ] }, - "execution_count": 1, "metadata": {}, - "output_type": "execute_result" + "execution_count": 25 } ], "source": [ @@ -162,107 +154,810 @@ }, { "cell_type": "markdown", - "metadata": { - "id": "SbmM4z7FOBpM" - }, + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", + "\n", + "\n", + "import vertexai\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdvJRUWRNGHE" + }, + "source": [ + "## Tuning token count and cost estimation: `Gemini 1.5 pro` and `Gemini 1.5 Flash`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cPhY560YQijW" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": { + "id": "4498u5KpQijW" + }, + "outputs": [], + "source": [ + "from collections import defaultdict\n", + "import dataclasses\n", + "import json\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "from vertexai.generative_models import Content, Part\n", + "from vertexai.preview.tokenization import get_tokenizer_for_model\n", + "from google.cloud import storage" + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Load the dataset\n", + "\n", + "This example is for text only. Define the Google Cloud Storage URIs pointing to your training and validation datasets or continue using the URIs provided." + ], + "metadata": { + "id": "wvqIIG1M0YCy" + } + }, + { + "cell_type": "code", + "source": [ + "BASE_MODEL = \"gemini-1.5-pro-002\" # @param ['gemini-1.5-pro-002']{type:\"string\"}\n", + "training_dataset_uri = \"gs://github-repo/generative-ai/gemini/tuning/train_sft_train_samples.jsonl\" # @param {type:\"string\"}\n", + "validation_dataset_uri = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}\n", + "\n", + "tokenizer = get_tokenizer_for_model(\"gemini-1.5-pro-001\")" + ], + "metadata": { + "id": "oue9Q0GG0Rvk" + }, + "execution_count": 135, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "We'll now load the dataset and conduct some basic statistical analysis to understand its structure and content.\n" + ], + "metadata": { + "id": "dbl6UD5P3LIH" + } + }, + { + "cell_type": "code", + "source": [ + "example_training_dataset = []\n", + "example_validation_dataset = []\n", + "\n", + "try:\n", + " with tf.io.gfile.GFile(training_dataset_uri) as dataset_jsonl_file:\n", + " example_training_dataset = [\n", + " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", + " ]\n", + "except KeyError as e:\n", + " print(f\"KeyError: Please check if your file '{training_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\")\n", + " # Exit the script if there's an error in the training data\n", + " import sys\n", + " sys.exit(1)\n", + "\n", + "print()\n", + "\n", + "if validation_dataset_uri:\n", + " try:\n", + " with tf.io.gfile.GFile(validation_dataset_uri) as dataset_jsonl_file:\n", + " example_validation_dataset = [\n", + " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", + " ]\n", + " except KeyError as e:\n", + " print(f\"KeyError: Please check if your file '{validation_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\")\n", + " # Exit the script if there's an error in the validation data\n", + " import sys\n", + " sys.exit(1)\n", + "\n", + "# Initial dataset stats\n", + "print(\"Num training examples:\", len(example_training_dataset))\n", + "if example_training_dataset: # Check if the list is not empty\n", + " print(\"First example:\")\n", + " for item in example_training_dataset[0][\"contents\"]:\n", + " print(item)\n", + " text_content = item.get(\"parts\", [{}])[0].get(\"text\", \"\")\n", + " print(tokenizer.count_tokens(text_content)) # Make sure 'tokenizer' is defined\n", + "\n", + "if example_validation_dataset:\n", + " print(\"Num validation examples:\", len(example_validation_dataset))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "M9TrzApr1tYQ", + "outputId": "2e19860a-0bf6-446c-8bcc-e262c3c3833c" + }, + "execution_count": 136, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "Num training examples: 500\n", + "First example:\n", + "{'role': 'user', 'parts': [{'text': 'Honesty is usually the best policy. It is disrespectful to lie to someone. If you don\\'t want to date someone, you should say so. Sometimes it is easy to be honest. For example, you might be able to truthfully say, \"No, thank you, I already have a date for that party.\" Other times, you might need to find a kinder way to be nice. Maybe you are not attracted to the person. Instead of bluntly saying that, try saying, \"No, thank you, I just don\\'t think we would be a good fit.\" Avoid making up a phony excuse. For instance, don\\'t tell someone you will be out of town this weekend if you won\\'t be. There\\'s a chance that you might then run into them at the movies, which would definitely cause hurt feelings. A compliment sandwich is a really effective way to provide feedback. Essentially, you \"sandwich\" your negative comment between two positive things. Try using this method when you need to reject someone. An example of a compliment sandwich is to say something such as, \"You\\'re an awesome person. Unfortunately, I\\'m not interested in dating you. Someone else is going to be really lucky to date someone with such a great personality!\" You could also try, \"You are a really nice person. I\\'m only interested you as a friend. I like when we hang out in big groups together!\" Be sincere. If you offer false compliments, the other person will likely be able to tell and feel hurt. If you do not want to date someone, it is best to be upfront about your feelings. Do not beat around the bush. If your mind is made up, it is best to clearly state your response. If someone asks you to date them and you don\\'t want to, you can be direct and kind at the same time. State your answer clearly. You can make your feelings clear without purposefully hurting someone else\\'s feelings. Try smiling and saying, \"That sounds fun, but no thank you. I\\'m not interested in dating you.\" Don\\'t beat around the bush. If you do not want to accept the date, there is no need to say, \"Let me think about it.\" It is best to get the rejection over with. You don\\'t want to give someone false hope. Avoid saying something like, \"Let me check my schedule and get back to you.\" Try to treat the person the way you would want to be treated. This means that you should choose your words carefully. Be thoughtful in your response. It\\'s okay to pause before responding. You might be taken by surprise and need a moment to collect your thoughts. Say thank you. It is a compliment to be asked out. You can say, \"I\\'m flattered. Unfortunately, I can\\'t accept.\" Don\\'t laugh. Many people laugh nervously in awkward situations. Try to avoid giggling, as that is likely to result in hurt feelings. Sometimes it is not what you say, but how you say it. If you need to reject someone, think about factors other than your words. Non-verbal communication matters, too. Use the right tone of voice. Try to sound gentle but firm. Make eye contact. This helps convey that you are being serious, and also shows respect for the other person. If you are in public, try not to speak too loudly. It is not necessary for everyone around you to know that you are turning down a date.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n'}]}\n", + "CountTokensResult(total_tokens=730)\n", + "{'role': 'model', 'parts': [{'text': 'Tell the truth. Use a \"compliment sandwich\". Be direct. Treat the person with respect. Communicate effectively.'}]}\n", + "CountTokensResult(total_tokens=23)\n", + "Num validation examples: 100\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "You can perform various error checks to validate that each tuning example in the dataset adheres to the format expected by the tuning API. Errors are categorized based on their nature for easier debugging. \n", + " \n", + "For how to prepare dataset for tuning gemini, please refer to this [tutorial](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-about).\n", + "\n", + "1. **Presence of System Instruction**: Checks if there is a system instruction and if its there for all rows. System instruction is optional. Warning type: `systemInstruction is missing in some rows`.\n", + "2. **Presence of Contents List:** Checks if a `contents` list is present in each entry. Error type: `missing_contents_list`.\n", + "3. **Content Item Format:** Validates that each item in the `contents` list is a dictionary. Error type: `invalid_content_item`.\n", + "4. **Content Item Format:** Validates that each item in the `contents` list is a dictionary. Error type: `invalid_content_item`.\n", + "5. **Role Validation:** Checks if the role is one of `user`, or `model` for `contents` list and system for `systemInstruction` list. Error type: `unrecognized_role`.\n", + "6. **Parts List Validation:** Verifies that the `parts` key contains a list. Error type: `missing_or_invalid_parts`.\n", + "7. **Part Format:** Checks if each part in the `parts` list is a dictionary and contains the key `text`. Error type: `invalid_part`.\n", + "8. **Text Validation:** Ensures that the `text` key has textual data and is a string. Error type: `missing_text`.\n", + "9. **Consecutive Turns:** For the chat history, it is enforced that the message roles alternate (user, then model, then user, etc.). Error type: `consecutive_turns`. This check is not applicable for systemInstruction.\n", + "\n", + "\n", + "\n" + ], + "metadata": { + "id": "L5RhrH6r4NrC" + } + }, + { + "cell_type": "code", + "source": [ + "from collections import defaultdict\n", + "\n", + "def validate_dataset_format(dataset):\n", + " \"\"\"Validates the dataset.\n", + "\n", + " Args:\n", + " dataset_uri: The dataset uri to be validated.\n", + " \"\"\"\n", + " format_errors = defaultdict(list)\n", + " system_instruction_missing = False # Flag to track missing systemInstruction\n", + "\n", + " if not dataset or len(dataset) == 0:\n", + " print(\"Input dataset file is empty or inaccessible.\")\n", + " return\n", + "\n", + " for row_idx, example in enumerate(dataset):\n", + " # Verify presence of contents list\n", + " if not isinstance(example, dict):\n", + " format_errors[\"invalid_input\"].append(row_idx)\n", + " continue\n", + "\n", + " # Check for systemInstruction and validate if present\n", + " system_instruction = example.get(\"systemInstruction\", None)\n", + " if system_instruction:\n", + " try:\n", + " # Validate the list within \"parts\"\n", + " validate_contents(system_instruction.get(\"parts\", []), format_errors, row_idx, is_system_instruction=True)\n", + " except (TypeError, AttributeError, KeyError) as e:\n", + " print(\"Invalid input during system instruction validation: %s\", e)\n", + " format_errors[\"invalid_system_instruction\"].append(row_idx)\n", + " else:\n", + " system_instruction_missing = True # Set the flag if missing\n", + "\n", + " contents = example.get(\"contents\", None)\n", + " if not contents:\n", + " format_errors[\"missing_contents_list\"].append(row_idx)\n", + " continue\n", + " try:\n", + " validate_contents(contents, format_errors, row_idx)\n", + " except (TypeError, AttributeError, KeyError) as e:\n", + " print(\"Invalid input during contents validation: %s\", e)\n", + " format_errors[\"invalid_input\"].append(row_idx)\n", + "\n", + " if format_errors:\n", + " print(\"Found errors for this dataset:\")\n", + " for k, v in format_errors.items():\n", + " print(f\"{k}: {v}\")\n", + " else:\n", + " print(\"No errors found for this dataset.\")\n", + "\n", + " # Print warning only once after processing all rows\n", + " if system_instruction_missing:\n", + " print(\"Warning: systemInstruction is missing in some rows.\")\n", + "\n", + "\n", + "def validate_contents(contents, format_errors, row_index, is_system_instruction=False):\n", + " \"\"\"Validates contents list format.\"\"\"\n", + "\n", + " if not isinstance(contents, list):\n", + " format_errors[\"invalid_contents_list\"].append(row_index)\n", + " return\n", + "\n", + " prev_role = None\n", + " for content_item in contents: # Iterate over content items in the \"contents\" list\n", + " if not isinstance(content_item, dict):\n", + " format_errors[\"invalid_content_item\"].append(row_index)\n", + " return\n", + "\n", + " # Skip key checks for system instructions\n", + " if not is_system_instruction and (\"role\" not in content_item or \"parts\" not in content_item):\n", + " format_errors[\"content_item_missing_key\"].append(row_index)\n", + " return\n", + "\n", + " # ... (rest of the validation logic remains the same)" + ], + "metadata": { + "id": "S2FVNbIX0R0n" + }, + "execution_count": 137, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "validate_dataset_format(example_training_dataset)\n", + "if example_validation_dataset:\n", + " validate_dataset_format(example_validation_dataset)" + ], + "metadata": { + "id": "nSLnbeJ00R2v", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "8f680322-45cc-4047-e76d-f7c3932d0439" + }, + "execution_count": 138, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "No errors found for this dataset.\n", + "Warning: systemInstruction is missing in some rows.\n", + "No errors found for this dataset.\n", + "Warning: systemInstruction is missing in some rows.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Utils for dataset analysis and token counting\n", + "\n", + "This section focuses on analyzing the structure and token counts of your datasets. You will also define some utility functions to streamline subsequent steps in the notebook.\n", + "\n", + "* Load and inspect sample data from the training and validation datasets.\n", + "* Calculate token counts for messages to understand the dataset's characteristics.\n", + "* Define utility functions for calculating token distributions and dataset statistics. These will help assess the suitability of your data for supervised tuning and estimate potential costs." + ], + "metadata": { + "id": "SJFwhPid_7oG" + } + }, + { + "cell_type": "code", + "source": [ + "@dataclasses.dataclass\n", + "class DatasetDistribution:\n", + " \"\"\"Dataset disbribution for given a population of values.\n", + "\n", + " It optionally contains a histogram consists of bucketized data representing\n", + " the distribution of those values. The summary statistics are the sum, min,\n", + " max, mean, median, p5, p95.\n", + "\n", + " Attributes:\n", + " sum: Sum of the values in the population.\n", + " max: Max of the values in the population.\n", + " min: Min of the values in the population.\n", + " mean: The arithmetic mean of the values in the population.\n", + " median: The median of the values in the population.\n", + " p5: P5 quantile of the values in the population.\n", + " p95: P95 quantile of the values in the population.\n", + " \"\"\"\n", + "\n", + " sum: int | None = None\n", + " max: float | None = None\n", + " min: float | None = None\n", + " mean: float | None = None\n", + " median: float | None = None\n", + " p5: float | None = None\n", + " p95: float | None = None\n", + "\n", + "\n", + "@dataclasses.dataclass\n", + "class DatasetStatistics:\n", + " \"\"\"Dataset statistics used for dataset profiling.\n", + "\n", + " Attributes:\n", + " total_number_of_dataset_examples: Number of tuning examples in the dataset.\n", + " total_number_of_records_for_training: Number of tuning records after\n", + " formatting. Each model turn in the chat message will be considered as a record for tuning.\n", + " total_number_of_billable_tokens: Number of total billable tokens in the\n", + " dataset.\n", + " user_input_token_length_stats: Stats for input token length.\n", + " user_output_token_length_stats: Stats for output token length.\n", + " \"\"\"\n", + "\n", + " total_number_of_dataset_examples: int | None = None\n", + " total_number_of_records_for_training: int | None = None\n", + " total_number_of_billable_tokens: int | None = None\n", + " user_input_token_length_stats: DatasetDistribution | None = None\n", + " user_output_token_length_stats: DatasetDistribution | None = None\n", + "\n", + "\n", + "MAX_TOKENS_PER_EXAMPLE = 32 * 1024\n", + "ESTIMATE_PADDING_TOKEN_PER_EXAMPLE = 8" + ], + "metadata": { + "id": "al_uUWOP4Ss2" + }, + "execution_count": 140, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def calculate_distribution_for_population(population) -> DatasetDistribution:\n", + " \"\"\"Calculates the distribution from the population of values.\n", + "\n", + " Args:\n", + " population: The population of values to calculate distribution for.\n", + "\n", + " Returns:\n", + " DatasetDistribution of the given population of values.\n", + " \"\"\"\n", + " if not population:\n", + " raise ValueError(\"population is empty\")\n", + "\n", + " return DatasetDistribution(\n", + " sum=np.sum(population),\n", + " max=np.max(population),\n", + " min=np.min(population),\n", + " mean=np.mean(population),\n", + " median=np.median(population),\n", + " p5=np.percentile(population, 5, method=\"nearest\"),\n", + " p95=np.percentile(population, 95, method=\"nearest\"),\n", + " )\n", + "\n", + "\n", + "def get_token_distribution_for_one_tuning_dataset_example(example):\n", + " model_turn_token_list = []\n", + " input_token_list = []\n", + " input = []\n", + " n_too_long = 0\n", + " number_of_records_for_training = 0 # each model turn in the chat message will be considered as a record for tuning\n", + "\n", + " # Handle optional systemInstruction\n", + " system_instruction = example.get(\"systemInstruction\")\n", + " if system_instruction:\n", + " text = system_instruction.get(\"parts\")[0].get(\"text\") # Assuming single part in system instruction\n", + " input.append(Content(role=\"system\", parts=[Part.from_text(text)]))\n", + "\n", + " for content_item in example[\"contents\"]:\n", + " role = content_item.get(\"role\").lower()\n", + " text = content_item.get(\"parts\")[0].get(\"text\") # Assuming single part in content item\n", + "\n", + " if role.lower() == \"model\":\n", + " result = tokenizer.count_tokens(input)\n", + " input_token_list.append(result.total_tokens)\n", + " model_turn_token_list.append(tokenizer.count_tokens(text).total_tokens)\n", + " number_of_records_for_training += 1\n", + " if (\n", + " result.total_tokens + tokenizer.count_tokens(text).total_tokens\n", + " > MAX_TOKENS_PER_EXAMPLE\n", + " ):\n", + " n_too_long += 1\n", + " break\n", + "\n", + " input.append(Content(role=role, parts=[Part.from_text(text)]))\n", + "\n", + " return (\n", + " input_token_list,\n", + " model_turn_token_list,\n", + " number_of_records_for_training,\n", + " np.sum(model_turn_token_list) + np.sum(input_token_list),\n", + " n_too_long,\n", + " )\n", + "\n", + "\n", + "def get_dataset_stats_for_dataset(dataset):\n", + " results = map(get_token_distribution_for_one_tuning_dataset_example, dataset)\n", + " user_input_token_list = []\n", + " model_turn_token_list = []\n", + " number_of_records_for_training = 0\n", + " total_number_of_billable_tokens = 0\n", + " n_too_long_for_dataset = 0\n", + " for (\n", + " input_token_list_per_example,\n", + " model_turn_token_list_per_example,\n", + " number_of_records_for_training_per_example,\n", + " number_of_billable_token_per_example,\n", + " n_too_long,\n", + " ) in results:\n", + " user_input_token_list.extend(input_token_list_per_example)\n", + " model_turn_token_list.extend(model_turn_token_list_per_example)\n", + " number_of_records_for_training += number_of_records_for_training_per_example\n", + " total_number_of_billable_tokens += number_of_billable_token_per_example\n", + " n_too_long_for_dataset += n_too_long\n", + "\n", + " print(\n", + " f\"\\n{n_too_long_for_dataset} examples may be over the {MAX_TOKENS_PER_EXAMPLE} token limit, they will be truncated during tuning.\"\n", + " )\n", + "\n", + " return DatasetStatistics(\n", + " total_number_of_dataset_examples=len(dataset),\n", + " total_number_of_records_for_training=number_of_records_for_training,\n", + " total_number_of_billable_tokens=total_number_of_billable_tokens\n", + " + number_of_records_for_training * ESTIMATE_PADDING_TOKEN_PER_EXAMPLE,\n", + " user_input_token_length_stats=calculate_distribution_for_population(\n", + " user_input_token_list\n", + " ),\n", + " user_output_token_length_stats=calculate_distribution_for_population(\n", + " model_turn_token_list\n", + " ),\n", + " )\n", + "\n", + "def print_dataset_stats(dataset):\n", + " dataset_stats = get_dataset_stats_for_dataset(dataset)\n", + " print(\"Below you can find the dataset statistics:\")\n", + " print(\n", + " f\"Total number of examples in the dataset: {dataset_stats.total_number_of_dataset_examples}\"\n", + " )\n", + " print(\n", + " f\"Total number of records for training: {dataset_stats.total_number_of_records_for_training}\"\n", + " )\n", + " print(\n", + " f\"Total number of billable tokens in the dataset: {dataset_stats.total_number_of_billable_tokens}\"\n", + " )\n", + " print(\n", + " f\"User input token length distribution: {dataset_stats.user_input_token_length_stats}\"\n", + " )\n", + " print(\n", + " f\"User output token length distribution: {dataset_stats.user_output_token_length_stats}\"\n", + " )\n", + " return dataset_stats" + ], + "metadata": { + "id": "21xgvR3g0R5P" + }, + "execution_count": 141, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next you can analyze the structure and token counts of your datasets." + ], + "metadata": { + "id": "-FF4ReY6Atw3" + } + }, + { + "cell_type": "code", + "source": [ + "training_dataset_stats = print_dataset_stats(example_training_dataset)\n", + "\n", + "if example_validation_dataset:\n", + " validation_dataset_stats = print_dataset_stats(example_validation_dataset)" + ], + "metadata": { + "id": "sZqsWno60R7O", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "8b78aea8-fbfa-4f48-9ab0-03bdca05f9f9" + }, + "execution_count": 142, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "0 examples may be over the 32768 token limit, they will be truncated during tuning.\n", + "Below you can find the dataset statistics:\n", + "Total number of examples in the dataset: 500\n", + "Total number of records for training: 500\n", + "Total number of billable tokens in the dataset: 259243\n", + "User input token length distribution: DatasetDistribution(sum=233592, max=2932, min=25, mean=467.184, median=414.5, p5=101, p95=1002)\n", + "User output token length distribution: DatasetDistribution(sum=21651, max=237, min=3, mean=43.302, median=37.0, p5=15, p95=89)\n", + "\n", + "0 examples may be over the 32768 token limit, they will be truncated during tuning.\n", + "Below you can find the dataset statistics:\n", + "Total number of examples in the dataset: 100\n", + "Total number of records for training: 100\n", + "Total number of billable tokens in the dataset: 50154\n", + "User input token length distribution: DatasetDistribution(sum=45535, max=1418, min=29, mean=455.35, median=413.5, p5=145, p95=846)\n", + "User output token length distribution: DatasetDistribution(sum=3819, max=165, min=8, mean=38.19, median=32.0, p5=17, p95=76)\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Cost Estimation for Supervised Fine-tuning\n", + "In this final section, you will estimate the total cost for supervised fine-tuning based on the number of tokens processed. The number of tokens used will be charged to you. Please refer to the [pricing page for the rate](https://cloud.google.com/vertex-ai/generative-ai/pricing#gemini-models).\n", + "\n", + "**Important Note:** The final cost may vary slightly from this estimate due to dataset formatting and truncation logic during training.\n", + "\n", + "The code calculates the total number of billable tokens by summing up the tokens from the training dataset and (if provided) the validation dataset. Then, it estimates the total cost by multiplying the total billable tokens with the number of training epochs (default is 4)." + ], + "metadata": { + "id": "KFWbXu17DfiS" + } + }, + { + "cell_type": "code", + "source": [ + "epoch_count = 4 # @param {type:\"integer\"}\n", + "if epoch_count is None:\n", + " epoch_count = 4\n", + "\n", + "\n", + "total_number_of_billable_tokens = training_dataset_stats.total_number_of_billable_tokens\n", + "\n", + "\n", + "if validation_dataset_stats:\n", + " total_number_of_billable_tokens += (\n", + " validation_dataset_stats.total_number_of_billable_tokens\n", + " )\n", + "\n", + "print(f\"Dataset has ~{total_number_of_billable_tokens} tokens that will be charged\")\n", + "print(f\"By default, you'll train for {epoch_count} epochs on this dataset.\")\n", + "print(\n", + " f\"By default, you'll be charged for ~{epoch_count * total_number_of_billable_tokens} tokens.\"\n", + ")" + ], + "metadata": { + "id": "k3ZJ_8fQ0R9x", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9525fd37-0fc4-44dc-a97e-99a9e44748c0" + }, + "execution_count": 143, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Dataset has ~309397 tokens that will be charged\n", + "By default, you'll train for 4 epochs on this dataset.\n", + "By default, you'll be charged for ~1237588 tokens.\n" + ] + } + ] + }, + { + "cell_type": "markdown", "source": [ - "
\n", - "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", - "
\n" - ] + "## Convert `Gemini 1.0 Pro` fine-tuning dataset to `Gemini 1.5 Pro` dataset." + ], + "metadata": { + "id": "K1EMMeRfH14a" + } }, { - "cell_type": "markdown", + "cell_type": "code", + "source": [ + "source_uri = \"gs://next-23-tuning-demo/example-fine-tuning.json\" # @param {type:\"string\"}\n", + "destination_uri = \"gs://next-23-tuning-demo/new-data-format.jsonl\" # @param {type:\"string\"}\n", + "system_instruction = \"You are a helpful and friendly AI assistant\" # Optional" + ], "metadata": { - "id": "dmWOrTJ3gx13" + "id": "oOKZgdSLJUFx" }, - "source": [ - "### Authenticate your notebook environment (Colab only)\n", - "\n", - "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." - ] + "execution_count": 144, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NyKGtVQjgx13" - }, - "outputs": [], "source": [ - "import sys\n", + "def convert_jsonl_format(\n", + " source_uri: str,\n", + " destination_uri: str,\n", + " system_instruction: str = None,\n", + "):\n", + " \"\"\"Converts a JSONL file from the old format to the new format.\n", "\n", - "if \"google.colab\" in sys.modules:\n", - " from google.colab import auth\n", + " Args:\n", + " source_uri: Google Cloud Storage URI of the source JSONL file.\n", + " destination_uri: Google Cloud Storage URI for the new JSONL file.\n", + " system_instruction: Optional system instruction text.\n", + " If provided, it will be added as \"systemInstruction\" in the new format.\n", + " \"\"\"\n", + " storage_client = storage.Client()\n", "\n", - " auth.authenticate_user()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DF4l8DTdWgPY" - }, - "source": [ - "### Set Google Cloud project information and initialize Vertex AI SDK\n", + " # Extract bucket and file name from source URI\n", + " source_bucket_name, source_blob_name = extract_bucket_and_blob_name(source_uri)\n", + " source_bucket = storage_client.bucket(source_bucket_name)\n", + " source_blob = source_bucket.blob(source_blob_name)\n", "\n", - "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + " # Extract bucket and file name from destination URI\n", + " dest_bucket_name, dest_blob_name = extract_bucket_and_blob_name(destination_uri)\n", + " dest_bucket = storage_client.bucket(dest_bucket_name)\n", + " dest_blob = dest_bucket.blob(dest_blob_name)\n", "\n", - "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Nqwi-5ufWp_B" - }, - "outputs": [], - "source": [ - "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", - "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", + " # Download the source JSONL file\n", + " source_data = source_blob.download_as_string().decode('utf-8')\n", "\n", + " new_data = []\n", + " for line in source_data.splitlines():\n", + " try:\n", + " json_data = json.loads(line)\n", + " new_json_data = convert_json_object(json_data, system_instruction)\n", + " new_data.append(new_json_data)\n", + " except json.JSONDecodeError as e:\n", + " print(f\"Skipping invalid JSON line: {line} - Error: {e}\")\n", "\n", - "import vertexai\n", + " # Upload the new JSONL file\n", + " new_data_str = \"\\n\".join([json.dumps(data) for data in new_data])\n", + " dest_blob.upload_from_string(new_data_str)\n", "\n", - "vertexai.init(project=PROJECT_ID, location=LOCATION)" - ] - }, - { - "cell_type": "markdown", + " print(f\"Successfully converted and uploaded to {destination_uri}\")\n", + "\n", + "\n", + "def convert_json_object(json_data: dict, system_instruction: str = None) -> dict:\n", + " \"\"\"Converts a single JSON object from the old format to the new format.\n", + "\n", + " Args:\n", + " json_data: The JSON object to convert.\n", + " system_instruction: Optional system instruction text.\n", + "\n", + " Returns:\n", + " The converted JSON object.\n", + " \"\"\"\n", + " new_json_data = {} # Create an empty dict instead of initializing with \"contents\"\n", + "\n", + " if system_instruction:\n", + " new_json_data[\"systemInstruction\"] = {\n", + " \"role\": \"system\",\n", + " \"parts\": [{\"text\": system_instruction}]\n", + " }\n", + "\n", + " new_json_data[\"contents\"] = [] # Initialize \"contents\" after \"systemInstruction\"\n", + "\n", + " for message in json_data.get(\"messages\", []):\n", + " new_message = {\n", + " \"role\": message[\"role\"],\n", + " \"parts\": [{\"text\": message[\"content\"]}]\n", + " }\n", + " new_json_data[\"contents\"].append(new_message)\n", + "\n", + " return new_json_data\n", + "\n", + "\n", + "def extract_bucket_and_blob_name(gcs_uri: str) -> tuple:\n", + " \"\"\"Extracts the bucket name and blob name from a Google Cloud Storage URI.\n", + "\n", + " Args:\n", + " gcs_uri: The Google Cloud Storage URI (e.g., \"gs://my-bucket/my-file.jsonl\")\n", + "\n", + " Returns:\n", + " A tuple containing the bucket name and blob name.\n", + " \"\"\"\n", + " if not gcs_uri.startswith(\"gs://\"):\n", + " raise ValueError(\"Invalid Google Cloud Storage URI\")\n", + " parts = gcs_uri[5:].split(\"/\", 1)\n", + " return parts[0], parts[1]" + ], "metadata": { - "id": "EdvJRUWRNGHE" + "id": "fgNjg3Y4CSq8" }, - "source": [ - "## Tuning token count and cost estimation." - ] + "execution_count": 117, + "outputs": [] }, { - "cell_type": "markdown", + "cell_type": "code", + "source": [ + "convert_jsonl_format(source_uri, destination_uri, system_instruction)" + ], "metadata": { - "id": "cPhY560YQijW" + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WAqrR4yDH1LT", + "outputId": "2e351e1c-ee6c-40e6-857f-581b12fe3872" }, - "source": [ - "### Import libraries" + "execution_count": 118, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Successfully converted and uploaded to gs://next-23-tuning-demo/new-data-format.jsonl\n" + ] + } ] }, { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "4498u5KpQijW" - }, - "outputs": [], + "cell_type": "markdown", "source": [ - "from collections import defaultdict\n", - "import dataclasses\n", - "import json\n", + "## Tuning token count and cost estimation for `Gemini 1.0 pro` legacy users.\n", "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from vertexai.generative_models import Content, Part\n", - "from vertexai.preview.tokenization import get_tokenizer_for_model" - ] + "Only use this part if you still use `Gemini 1.0 pro`. Its best to upgrade to using [`gemini-1.5-pro-002`](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning)." + ], + "metadata": { + "id": "9k1GJaFIEvd-" + } }, { "cell_type": "markdown", @@ -303,7 +998,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "PTvunHqRTHqe" + "id": "PTvunHqRTHqe", + "outputId": "8d1aabc9-cf3b-4150-f768-c40d0d92c237" }, "outputs": [ { @@ -458,7 +1154,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "pUCpEmEFM0eX" + "id": "pUCpEmEFM0eX", + "outputId": "1bf39ccb-4898-4c44-9a6e-557e58694d7a" }, "outputs": [ { @@ -687,7 +1384,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "uOWsUbwVXoTU" + "id": "uOWsUbwVXoTU", + "outputId": "c644fa82-1de4-4ba5-f9cf-44f4232917ee" }, "outputs": [ { @@ -751,7 +1449,8 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "DVIpbaGYRJQc" + "id": "DVIpbaGYRJQc", + "outputId": "2e2f78cc-2005-4965-af26-a1cc5627e7ee" }, "outputs": [ { @@ -784,179 +1483,6 @@ " f\"By default, you'll be charged for ~{epoch_count * total_number_of_billable_tokens} tokens.\"\n", ")" ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "k35OK86wJVVd" - }, - "source": [ - "## Validate the dataset for Vertex AI Gemini 1.5 fine-tuning" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "KQWJhyaXQRNM" - }, - "outputs": [], - "source": [ - "BASE_MODEL = \"gemini-1.5-pro-002\" # @param ['gemini-1.5-pro-002']{type:\"string\"}\n", - "training_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/train_sft_train_samples.jsonl\" # @param {type:\"string\"}\n", - "validation_dataset_uri_2 = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "p-soJC81YNy2" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Num training examples: 500\n", - "First example:\n", - "{'role': 'user', 'parts': [{'text': 'Honesty is usually the best policy. It is disrespectful to lie to someone. If you don\\'t want to date someone, you should say so. Sometimes it is easy to be honest. For example, you might be able to truthfully say, \"No, thank you, I already have a date for that party.\" Other times, you might need to find a kinder way to be nice. Maybe you are not attracted to the person. Instead of bluntly saying that, try saying, \"No, thank you, I just don\\'t think we would be a good fit.\" Avoid making up a phony excuse. For instance, don\\'t tell someone you will be out of town this weekend if you won\\'t be. There\\'s a chance that you might then run into them at the movies, which would definitely cause hurt feelings. A compliment sandwich is a really effective way to provide feedback. Essentially, you \"sandwich\" your negative comment between two positive things. Try using this method when you need to reject someone. An example of a compliment sandwich is to say something such as, \"You\\'re an awesome person. Unfortunately, I\\'m not interested in dating you. Someone else is going to be really lucky to date someone with such a great personality!\" You could also try, \"You are a really nice person. I\\'m only interested you as a friend. I like when we hang out in big groups together!\" Be sincere. If you offer false compliments, the other person will likely be able to tell and feel hurt. If you do not want to date someone, it is best to be upfront about your feelings. Do not beat around the bush. If your mind is made up, it is best to clearly state your response. If someone asks you to date them and you don\\'t want to, you can be direct and kind at the same time. State your answer clearly. You can make your feelings clear without purposefully hurting someone else\\'s feelings. Try smiling and saying, \"That sounds fun, but no thank you. I\\'m not interested in dating you.\" Don\\'t beat around the bush. If you do not want to accept the date, there is no need to say, \"Let me think about it.\" It is best to get the rejection over with. You don\\'t want to give someone false hope. Avoid saying something like, \"Let me check my schedule and get back to you.\" Try to treat the person the way you would want to be treated. This means that you should choose your words carefully. Be thoughtful in your response. It\\'s okay to pause before responding. You might be taken by surprise and need a moment to collect your thoughts. Say thank you. It is a compliment to be asked out. You can say, \"I\\'m flattered. Unfortunately, I can\\'t accept.\" Don\\'t laugh. Many people laugh nervously in awkward situations. Try to avoid giggling, as that is likely to result in hurt feelings. Sometimes it is not what you say, but how you say it. If you need to reject someone, think about factors other than your words. Non-verbal communication matters, too. Use the right tone of voice. Try to sound gentle but firm. Make eye contact. This helps convey that you are being serious, and also shows respect for the other person. If you are in public, try not to speak too loudly. It is not necessary for everyone around you to know that you are turning down a date.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n'}]}\n", - "{'role': 'model', 'parts': [{'text': 'Tell the truth. Use a \"compliment sandwich\". Be direct. Treat the person with respect. Communicate effectively.'}]}\n", - "Num validation examples: 100\n" - ] - } - ], - "source": [ - "with tf.io.gfile.GFile(training_dataset_uri_2) as dataset_jsonl_file:\n", - " example_training_dataset = [\n", - " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", - " ]\n", - "\n", - "print()\n", - "\n", - "if validation_dataset_uri:\n", - " with tf.io.gfile.GFile(validation_dataset_uri_2) as dataset_jsonl_file:\n", - " example_validation_dataset = [\n", - " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", - " ]\n", - "\n", - "# Initial dataset stats\n", - "print(\"Num training examples:\", len(example_training_dataset))\n", - "print(\"First example:\")\n", - "for message in example_training_dataset[0][\"contents\"]:\n", - " print(message)\n", - "\n", - "if example_validation_dataset:\n", - " print(\"Num validation examples:\", len(example_validation_dataset))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "K0YOqIbtXwGI" - }, - "outputs": [], - "source": [ - "def validate_dataset_format(dataset):\n", - " \"\"\"Validates the dataset.\n", - "\n", - " Args:\n", - " dataset_uri: The dataset uri to be validated.\n", - " \"\"\"\n", - " format_errors = defaultdict(list)\n", - " if not dataset or len(dataset) == 0:\n", - " print(\"Input dataset file is empty or inaccessible.\")\n", - " return\n", - "\n", - " for row_idx, example in enumerate(dataset):\n", - " # Verify presence of contents list\n", - " if not isinstance(example, dict):\n", - " format_errors[\"invalid_input\"].append(row_idx)\n", - " continue\n", - " contents = example.get(\"contents\", None)\n", - " if not contents:\n", - " format_errors[\"missing_contents_list\"].append(row_idx)\n", - " continue\n", - " try:\n", - " validate_contents(contents, format_errors, row_idx)\n", - " except (TypeError, AttributeError, KeyError) as e:\n", - " print(\"Invalid input during validation: %s\", e)\n", - " format_errors[\"invalid_input\"].append(row_idx)\n", - "\n", - " if format_errors:\n", - " print(\"Found errors for this dataset:\")\n", - " for k, v in format_errors.items():\n", - " print(f\"{k}: {v}\")\n", - " else:\n", - " print(\"No errors found for this dataset.\")\n", - "\n", - "\n", - "def validate_contents(contents, format_errors, row_index):\n", - " \"\"\"Validates contents list format.\"\"\"\n", - " if not isinstance(contents, list):\n", - " format_errors[\"invalid_contents_list\"].append(row_index)\n", - " return\n", - "\n", - " prev_role = None\n", - " for content_item in contents:\n", - " if not isinstance(content_item, dict):\n", - " format_errors[\"invalid_content_item\"].append(row_index)\n", - " return\n", - "\n", - " if \"role\" not in content_item or \"parts\" not in content_item:\n", - " format_errors[\"content_item_missing_key\"].append(row_index)\n", - " return\n", - "\n", - " if content_item.get(\"role\", \"\").lower() not in (\"user\", \"model\"):\n", - " format_errors[\"unrecognized_role\"].append(row_index)\n", - " return\n", - "\n", - " parts = content_item.get(\"parts\", None)\n", - " if not parts or not isinstance(parts, list):\n", - " format_errors[\"missing_or_invalid_parts\"].append(row_index)\n", - " return\n", - "\n", - " for part in parts:\n", - " if not isinstance(part, dict) or \"text\" not in part:\n", - " format_errors[\"invalid_part\"].append(row_index)\n", - " return\n", - "\n", - " if not part.get(\"text\"):\n", - " format_errors[\"missing_text\"].append(row_index)\n", - " return\n", - "\n", - " role = content_item.get(\"role\", \"\").lower()\n", - " # messages to have alternate turns.\n", - " if role == prev_role:\n", - " format_errors[\"consecutive_turns\"].append(row_index)\n", - " return\n", - "\n", - " prev_role = role" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RppMRP9lIkq2" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No errors found for this dataset.\n", - "No errors found for this dataset.\n" - ] - } - ], - "source": [ - "validate_dataset_format(example_training_dataset)\n", - "if example_validation_dataset:\n", - " validate_dataset_format(example_validation_dataset)" - ] } ], "metadata": { @@ -966,8 +1492,8 @@ "dmWOrTJ3gx13", "DF4l8DTdWgPY" ], - "name": "vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb", - "toc_visible": true + "toc_visible": true, + "provenance": [] }, "kernelspec": { "display_name": "Python 3", @@ -976,4 +1502,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From fcd3bb11df59b0110d23f8c5e2e1cd980e1d26f8 Mon Sep 17 00:00:00 2001 From: Deepak moonat Date: Wed, 2 Oct 2024 07:11:23 +0530 Subject: [PATCH 32/76] feat: add gemini supervised finetuning on image data notebook (#1181) # Description Notebook showcasing how to do supervised finetuning using gemini1.5-pro-002 on image data - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot Co-authored-by: Erwin Huizenga --- ...inetuning_using_gemini_on_image_data.ipynb | 1934 +++++++++++++++++ ...ning_token_count_and_cost_estimation.ipynb | 355 ++- 2 files changed, 2106 insertions(+), 183 deletions(-) create mode 100644 gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb diff --git a/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb new file mode 100644 index 0000000000..a9cf0b47bb --- /dev/null +++ b/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb @@ -0,0 +1,1934 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "9f0d0f32-23b4-41a6-b364-579da297c326" + }, + "outputs": [], + "source": [ + "# @title Copyright & License (click to expand)\n", + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dd53d60c-97eb-4c72-91ea-f274a753ab34" + }, + "source": [ + "# Supervised Fine Tuning with Gemini 1.5 Pro for Image Captioning\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MgVK7IeKpW27" + }, + "source": [ + "| | | |\n", + "|-|-|-|\n", + "|Author(s) | [Deepak Moonat](https://github.com/dmoonat)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9ef820fb-1203-4cab-965f-17093a4ba25e" + }, + "source": [ + "## Overview\n", + "\n", + "**Gemini** is a family of generative AI models developed by Google DeepMind that is designed for multimodal use cases. The Gemini API gives you access to the various Gemini models, such as Gemini 1.5 Pro, Gemini 1.0 Pro and more.\n", + "\n", + "This notebook demonstrates how to fine-tune the Gemini 1.5 Pro Model for Vision task using the Vertex AI Supervised Tuning feature. Supervised Tuning allows you to use your own training data to further refine the base model's capabilities towards your specific tasks.\n", + "\n", + "\n", + "Supervised Tuning uses labeled examples to tune a model. Each example demonstrates the output you want from your text model during inference.\n", + "\n", + "First, ensure your training data is of high quality, well-labeled, and directly relevant to the target task. This is crucial as low-quality data can adversely affect the performance and introduce bias in the fine-tuned model.\n", + "- Training: Experiment with different configurations to optimize the model's performance on the target task.\n", + "- Evaluation:\n", + " - Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", + " - Evaluation Set: Use a separate set of data to evaluate the model's performance" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "74b00940-376c-4056-90fb-d22c1ce6eedf" + }, + "source": [ + "### Objective\n", + "\n", + "In this tutorial, you will learn how to use `Vertex AI` to tune a `gemini-1.5-pro-002` model on image data.\n", + "\n", + "\n", + "This tutorial uses the following Google Cloud ML services:\n", + "\n", + "- `Vertex AI`\n", + "\n", + "\n", + "The steps performed include:\n", + "\n", + "- Prepare and load the dataset\n", + "- Load the `gemini-1.5-pro-002` model\n", + "- Evaluate the model before tuning\n", + "- Tune the model.\n", + " - This will automatically create a Vertex AI endpoint and deploy the model to it\n", + "- Evaluate the model after tuning\n", + "- Make a prediction using tuned model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c6b43693-b20a-41bd-b5b8-5ad414517162" + }, + "source": [ + "### Model\n", + "\n", + "The pre-trained LLM model is `gemini-1.5-pro-002` for text generation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "X0xdTMs10K7y" + }, + "source": [ + "### Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jCMczwd00N9T" + }, + "source": [ + "Dataset used in this notebook is about image captioning.\n", + "\n", + "[Reference](https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma#download_the_model_checkpoint)\n", + "\n", + "Licensed under the Creative Commons Attribution 4.0 License" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6d7b5435-e947-49bb-9ce3-aa8a42c30118" + }, + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "* Vertex AI\n", + "* Cloud Storage\n", + "\n", + "Learn about [Vertex AI\n", + "pricing](https://cloud.google.com/vertex-ai/pricing), [Cloud Storage\n", + "pricing](https://cloud.google.com/storage/pricing), and use the [Pricing\n", + "Calculator](https://cloud.google.com/products/calculator/)\n", + "to generate a cost estimate based on your projected usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0cbf01f0-5f6e-4bcd-903f-84ccaad5332c" + }, + "source": [ + "## Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MpDAgOsK6kZn" + }, + "outputs": [], + "source": [ + "! pip3 install --upgrade --user --quiet google-cloud-aiplatform jsonlines rouge_score" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Moror1y0Qq2z" + }, + "source": [ + "### Restart runtime (Colab only)\n", + "\n", + "To use the newly installed packages, you must restart the runtime on Google Colab." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4KLm_nKmQtC8" + }, + "outputs": [], + "source": [ + "# Automatically restart kernel after installs so that your environment can access the new packages\n", + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " import IPython\n", + "\n", + " app = IPython.Application.instance()\n", + " app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dpSnJTbIrFsh" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b37d4259-7e39-417b-8879-24f7575732c8" + }, + "source": [ + "## Before you begin\n", + "\n", + "### Set your project ID\n", + "\n", + "**If you don't know your project ID**, try the following:\n", + "* Run `gcloud config list`.\n", + "* Run `gcloud projects list`.\n", + "* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "caaf0d7e-c6cb-4e56-af5c-553db5180e00" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[YOUR_PROJECT_ID]\" # @param {type:\"string\"}\n", + "# Set the project id\n", + "! gcloud config set project {PROJECT_ID}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "054d794d-cd2e-4280-95ac-859b264ea2d6" + }, + "source": [ + "#### Region\n", + "\n", + "You can also change the `REGION` variable used by Vertex AI. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0121bf60-1acd-4272-afaf-aa54b4ded263" + }, + "outputs": [], + "source": [ + "REGION = \"us-central1\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "czjH2JfKaGfH" + }, + "source": [ + "#### Bucket\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c_iZzYtraF3y" + }, + "outputs": [], + "source": [ + "BUCKET_NAME = \"[YOUR_BUCKET_NAME]\" # @param {type:\"string\"}\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eac9e842-d225-4876-836f-afdb1937d800" + }, + "source": [ + "### Authenticate your Google Cloud account\n", + "\n", + "Depending on your Jupyter environment, you may have to manually authenticate. Follow the relevant instructions below.\n", + "\n", + "**1. Vertex AI Workbench**\n", + "* Do nothing as you are already authenticated.\n", + "\n", + "**2. Local JupyterLab instance, uncomment and run:**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "23082eec-b1bd-4594-b5b5-56fe2b74db6f" + }, + "outputs": [], + "source": [ + "# ! gcloud auth login" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3c20f923-3c46-4d6d-80d2-d7cb22b1a8da" + }, + "source": [ + "**3. Authenticate your notebook environment**\n", + "\n", + "If you are running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "60302a3f-fad9-452c-8998-a9c9822d2732" + }, + "outputs": [], + "source": [ + "from google.colab import auth\n", + "\n", + "auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ac33116d-b079-46cb-9614-86326c211e00" + }, + "source": [ + "**4. Service account or other**\n", + "* See how to grant Cloud Storage permissions to your service account at https://cloud.google.com/storage/docs/gsutil/commands/iam#ch-examples." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e6a924d0-a034-4e53-b240-03d356c7b7a6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "463729ba-ec3c-4302-95bf-80207b0f9e2d" + }, + "outputs": [], + "source": [ + "import io\n", + "import time\n", + "\n", + "# For visualization.\n", + "from PIL import Image\n", + "\n", + "# For google cloud storage service.\n", + "from google.cloud import storage\n", + "\n", + "# For fine tuning Gemini model.\n", + "import google.cloud.aiplatform as aiplatform\n", + "\n", + "# For data handling.\n", + "import jsonlines\n", + "import pandas as pd\n", + "\n", + "# For evaluation.\n", + "from rouge_score import rouge_scorer\n", + "from tqdm import tqdm\n", + "from vertexai.preview.generative_models import GenerationConfig, GenerativeModel, Part\n", + "from vertexai.preview.tuning import sft" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a522acfe-d0b6-4b4e-b201-0a4ccf59b133" + }, + "source": [ + "## Initialize Vertex AI SDK for Python\n", + "\n", + "Initialize the Vertex AI SDK for Python for your project and corresponding bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c845aca6-4f72-4d3b-b9ed-de4a18fcbbf8" + }, + "outputs": [], + "source": [ + "aiplatform.init(project=PROJECT_ID, location=REGION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "okht6CExcw4d" + }, + "source": [ + "## Prepare Multimodal Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8N1QCz0MzyD6" + }, + "source": [ + "The dataset used to tune a foundation model needs to include examples that align with the task that you want the model to perform." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9yp9SQ1M7FSP" + }, + "source": [ + "Note:\n", + "- Only support images and text as input, and text only as output.\n", + "- Maximum 16 Images per tuning example.\n", + "- Maximum image file size: 20MB\n", + "- Image needs to be in `jpeg` or `png` format. Supported mimetypes: `image/jpeg` and `image/png`\n", + "\n", + "Input is a jsonl file with each json string being on one line.\n", + "Each json instance have the format (Expanded for clarity):\n", + "```\n", + "{\n", + " \"contents\":[\n", + " {\n", + " \"role\":\"user\", # This indicate input content\n", + " \"parts\":[ # Interleaved image and text, could be in any order.\n", + " {\n", + " \"fileData\":{ # FileData needs to be reference to image file in gcs. No inline data.\n", + " \"mimeType\":\"image/jpeg\", # Provide the mimeType about this image\n", + " \"fileUri\":\"gs://path/to/image_uri\"\n", + " }\n", + " }\n", + " {\n", + " \"text\":\"What is in this image?\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"role\":\"model\", # This indicate target content\n", + " \"parts\":[ # text only\n", + " {\n", + " \"text\":\"Something about this image.\"\n", + " }\n", + " ]\n", + " } # Single turn input and response.\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Example:\n", + "```\n", + "{\n", + " \"contents\":[\n", + " {\n", + " \"role\":\"user\",\n", + " \"parts\":[\n", + " {\n", + " \"fileData\":{\n", + " \"mimeType\":\"image/jpeg\",\n", + " \"fileUri\":\"gs://bucketname/data/vision_data/task/image_description/image/1.jpeg\"\n", + " }\n", + " },\n", + " {\n", + " \"text\":\"Describe this image that captures the essence of it.\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"role\":\"model\",\n", + " \"parts\":[\n", + " {\n", + " \"text\":\"A person wearing a pink shirt and a long-sleeved shirt with a large cuff, ....\"\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DESw8v4QrLHR" + }, + "source": [ + "### Data files\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uiTVJqMXTvM5" + }, + "source": [ + "Data used in this notebook is present in the public Cloud Storage(GCS) bucket, `gs://longcap100`.\n", + "\n", + "Sample:\n", + "\n", + "> {\"prefix\": \"\", \"suffix\": \"A person wearing a pink shirt and a long-sleeved shirt with a large cuff, has their hand on a concrete ledge. The hand is on the edge of the ledge, and the thumb is on the edge of the hand. The shirt has a large cuff, and the sleeve is rolled up. The shadow of the hand is on the wall.\", \"image\": \"91.jpeg\"}\n", + "\n", + "\n", + "\n", + "- `data_train90.jsonl`: Contains training samples in json lines as shown above\n", + "- `data_val10.jsonl`: Contains validation samples in json lines as shown above\n", + "- `images`: Contains 100 images, training and validation data" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MLcuIXlzz36C" + }, + "source": [ + "To run a tuning job, you need to upload one or more datasets to a Cloud Storage bucket. You can either create a new Cloud Storage bucket or use an existing one to store dataset files. The region of the bucket doesn't matter, but we recommend that you use a bucket that's in the same Google Cloud project where you plan to tune your model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sfIUgj-mU8K9" + }, + "source": [ + "### Create a Cloud Storage bucket" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T_uC6nuFU-XU" + }, + "source": [ + "- Create a storage bucket to store intermediate artifacts such as datasets.\n", + "\n", + "- Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "M-L1BH8TU9Gn" + }, + "outputs": [], + "source": [ + "!gsutil mb -l {REGION} -p {PROJECT_ID} {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZUGi7ZThbChr" + }, + "source": [ + "### Copy images to specified Bucket" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DHdC-9nj071o" + }, + "outputs": [], + "source": [ + "!gsutil -m -q cp -n -r gs://longcap100/*.jpeg {BUCKET_URI}/images/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fpyJR6tlVRXh" + }, + "source": [ + "- Download the training and validation dataset jsonlines files from the bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "peUixIt_2DLP" + }, + "outputs": [], + "source": [ + "!gsutil -m -q cp -n -r gs://longcap100/data_train90.jsonl ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rtXMRqAi1WiF" + }, + "outputs": [], + "source": [ + "!gsutil -m -q cp -n -r gs://longcap100/data_val10.jsonl ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a9N-rN7pECKa" + }, + "source": [ + "### Prepare dataset for Training and Evaluation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KEfGLRVfsrii" + }, + "source": [ + "- Utility function to save json instances into jsonlines format" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zdVGCwFWsrCB" + }, + "outputs": [], + "source": [ + "def save_jsonlines(file, instances):\n", + " \"\"\"\n", + " Saves a list of json instances to a jsonlines file.\n", + " \"\"\"\n", + " with jsonlines.open(file, mode=\"w\") as writer:\n", + " writer.write_all(instances)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-hMIYgYBsbUt" + }, + "source": [ + "- Below function converts the dataset into Gemini-1.5 tuning format" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0TFcj_tjaALV" + }, + "outputs": [], + "source": [ + "task_prompt = \"Describe this image in detail that captures the essence of it.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LZ1cauVkz8Vv" + }, + "outputs": [], + "source": [ + "def create_tuning_samples(file_path):\n", + " \"\"\"\n", + " Creates tuning samples from a file.\n", + " \"\"\"\n", + " with jsonlines.open(file_path) as reader:\n", + " instances = []\n", + " for obj in reader:\n", + " instance = {\n", + " \"contents\": [\n", + " {\n", + " \"role\": \"user\", # This indicate input content\n", + " \"parts\": [ # Interleaved image and text, could be in any order.\n", + " {\n", + " \"fileData\": { # FileData needs to be reference to image file in gcs. No inline data.\n", + " \"mimeType\": \"image/jpeg\", # Provide the mimeType about this image\n", + " \"fileUri\": f\"{BUCKET_URI}/images/{obj['image']}\",\n", + " }\n", + " },\n", + " {\"text\": task_prompt},\n", + " ],\n", + " },\n", + " {\n", + " \"role\": \"model\", # This indicate target content\n", + " \"parts\": [{\"text\": obj[\"suffix\"]}], # text only\n", + " }, # Single turn input and response.\n", + " ]\n", + " }\n", + " instances.append(instance)\n", + " return instances" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tqh6WYHg6X4z" + }, + "source": [ + "- Training data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "b685Iy27z1E1" + }, + "outputs": [], + "source": [ + "train_file_path = \"data_train90.jsonl\"\n", + "train_instances = create_tuning_samples(train_file_path)\n", + "# save the training instances to jsonl file\n", + "save_jsonlines(\"train.jsonl\", train_instances)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UC4ULRC46mA-" + }, + "outputs": [], + "source": [ + "train_instances[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nyn5Xgw41bhc" + }, + "outputs": [], + "source": [ + "# save the training data to gcs bucket\n", + "!gsutil cp train.jsonl {BUCKET_URI}/train/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HLsC3IBL6ZWk" + }, + "source": [ + "- Validation data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LIp0hdag6bS0" + }, + "outputs": [], + "source": [ + "val_file_path = \"data_val10.jsonl\"\n", + "val_instances = create_tuning_samples(val_file_path)\n", + "# save the training instances to jsonl file\n", + "save_jsonlines(\"val.jsonl\", val_instances)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "TBTBTx4n6koL" + }, + "outputs": [], + "source": [ + "val_instances[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xy-6ihNR6gx3" + }, + "outputs": [], + "source": [ + "# save the validation data to gcs bucket\n", + "!gsutil cp val.jsonl {BUCKET_URI}/val/" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QhejcJumTAj3" + }, + "source": [ + "- Below code transforms the jsonl format to following structure\n", + "\n", + "`\n", + "[{'file_uri': '',\n", + " 'ground_truth': 'Experiment with different parameter values to get the best values for the task\n", + "\n", + "Refer to the following [link](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/adjust-parameter-values) for understanding different parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zUx23W_r0F8z" + }, + "source": [ + "**Prompt** is a natural language request submitted to a language model to receive a response back\n", + "\n", + "Some best practices include\n", + " - Clearly communicate what content or information is most important\n", + " - Structure the prompt:\n", + " - Defining the role if using one. For example, You are an experienced UX designer at a top tech company\n", + " - Include context and input data\n", + " - Provide the instructions to the model\n", + " - Add example(s) if you are using them\n", + "\n", + "Refer to the following [link](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-design-strategies) for prompt design strategies." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uuKHRy2OVX0w" + }, + "source": [ + "### Task" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U-YD1J3VTSoI" + }, + "source": [ + "***Task prompt:***\n", + "\n", + "`\n", + "\", Describe this image that captures the essence of it. \"\n", + "`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zTZS4IJMTVR1" + }, + "source": [ + "***Query Image (image)***\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-Ry2IjT2TWwd" + }, + "outputs": [], + "source": [ + "query_image_uri = val_instances[0][\"contents\"][0][\"parts\"][0][\"fileData\"][\"fileUri\"]\n", + "blob_name = query_image_uri.replace(f\"{BUCKET_URI}/\", \"\")\n", + "img = read_image_bytes_from_gcs(BUCKET_NAME, blob_name)\n", + "\n", + "# Display image bytes using pil python library\n", + "image = Image.open(io.BytesIO(img))\n", + "resized_img = image.resize((300, 300))\n", + "display(resized_img)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "04lAlLK53IYS" + }, + "source": [ + "- Test on single instance" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-MeiP8z-o6qt" + }, + "outputs": [], + "source": [ + "response = generation_model.generate_content(\n", + " contents=[\n", + " Part.from_uri(query_image_uri, \"image/jpeg\"),\n", + " \"Describe this image that captures the essence of it.\",\n", + " ],\n", + " # Optional config\n", + " generation_config=GenerationConfig(\n", + " temperature=0.0,\n", + " ),\n", + ")\n", + "\n", + "print(response.text.strip())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5LISwh5_4R1U" + }, + "source": [ + "- Ground truth" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aGXbUVK-3lO5" + }, + "outputs": [], + "source": [ + "val_instances[0][\"contents\"][1][\"parts\"][0][\"text\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MRVAwGLB6KUX" + }, + "source": [ + "- Change prompt to get detailed description for the provided image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JO-C5BAVsdfd" + }, + "outputs": [], + "source": [ + "response = generation_model.generate_content(\n", + " contents=[\n", + " Part.from_uri(query_image_uri, \"image/jpeg\"),\n", + " \"Describe this image in detail that captures the essence of it.\",\n", + " ],\n", + " # Optional config\n", + " generation_config=GenerationConfig(\n", + " temperature=0.0,\n", + " ),\n", + ")\n", + "\n", + "print(response.text.strip())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "snYSjdzCVjGA" + }, + "source": [ + "## Evaluation before model tuning" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vVvGqqTSVzUZ" + }, + "source": [ + "- Evaluate the Gemini model on the validation dataset before tuning it on the training dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "otIRm3XBwQnW" + }, + "outputs": [], + "source": [ + "def get_prediction(query_image, generation_model):\n", + " \"\"\"Gets the prediction for a given instance.\n", + "\n", + " Args:\n", + " query_image: The path to the query image.\n", + " candidates: A list of paths to the candidate images.\n", + " generation_model: The generation model to use for prediction.\n", + "\n", + " Returns:\n", + " A string containing the prediction.\n", + " \"\"\"\n", + " response = generation_model.generate_content(\n", + " contents=[Part.from_uri(query_image, \"image/jpeg\"), task_prompt],\n", + " # Optional config\n", + " generation_config=GenerationConfig(\n", + " temperature=0.0,\n", + " ),\n", + " )\n", + "\n", + " return response.text.strip()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rRW5UVau3xfO" + }, + "outputs": [], + "source": [ + "def run_eval(val_df, model=generation_model):\n", + " \"\"\"Runs evaluation on the validation dataset.\n", + "\n", + " Args:\n", + " val_df: The validation dataframe.\n", + " generation_model: The generation model to use for evaluation.\n", + "\n", + " Returns:\n", + " A list of predictions on val_df.\n", + " \"\"\"\n", + " predictions = []\n", + " for i, row in tqdm(val_df.iterrows(), total=val_df.shape[0]):\n", + " try:\n", + " prediction = get_prediction(row[\"file_uri\"], model)\n", + " except:\n", + " time.sleep(30)\n", + " prediction = get_prediction(row[\"file_uri\"], model)\n", + " predictions.append(prediction)\n", + " time.sleep(1)\n", + " return predictions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "29O4EccbqbIa" + }, + "source": [ + "- Evaluate the Gemini model on the test dataset before tuning it on the training dataset.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0LunPnr5Tvce" + }, + "source": [ + "
\n", + "⚠️ It will take 1-2 mins for the model to generate predictions on the provided validation dataset. ⚠️\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Y2Uy75youUor" + }, + "outputs": [], + "source": [ + "%%time\n", + "predictions = run_eval(val_df, model=generation_model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7BOg0EZpgg3D" + }, + "outputs": [], + "source": [ + "len(predictions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "N22X-_V5mlev" + }, + "outputs": [], + "source": [ + "val_df.loc[:, \"basePredictions\"] = predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bzA_YLSQ67Jc" + }, + "outputs": [], + "source": [ + "val_df" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nbPYwzNVWgz-" + }, + "source": [ + "### Evaluation metric" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mvqIYHNCWigP" + }, + "source": [ + "The type of metrics used for evaluation depends on the task that you are evaluating. The following table shows the supported tasks and the metrics used to evaluate each task:\n", + "\n", + "| Task | Metric(s) |\n", + "|-----------------|---------------------------------|\n", + "| Classification | Micro-F1, Macro-F1, Per class F1 |\n", + "| Summarization | ROUGE-L |\n", + "| Question Answering | Exact Match |\n", + "| Text Generation | BLEU, ROUGE-L |\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BTkLeYDJWre1" + }, + "source": [ + "For this task, we'll using ROUGE metric.\n", + "\n", + "- **Recall-Oriented Understudy for Gisting Evaluation (ROUGE)**: A metric used to evaluate the quality of automatic summaries of text. It works by comparing a generated summary to a set of reference summaries created by humans." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TIlOr8KFWzqt" + }, + "source": [ + "Now you can take the candidate and reference to evaluate the performance. In this case, ROUGE will give you:\n", + "\n", + "- `rouge-1`, which measures unigram overlap\n", + "- `rouge-2`, which measures bigram overlap\n", + "- `rouge-l`, which measures the longest common subsequence" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sIVb60EaW2oW" + }, + "source": [ + "- *Recall vs. Precision*\n", + "\n", + " **Recall**, meaning it prioritizes how much of the information in the reference summaries is captured in the generated summary.\n", + "\n", + " **Precision**, which measures how much of the generated summary is relevant to the original text." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rDwfndw9OAW9" + }, + "source": [ + "- Initialize `rouge_score` object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1SEVHIrk69kj" + }, + "outputs": [], + "source": [ + "scorer = rouge_scorer.RougeScorer([\"rouge1\", \"rouge2\", \"rougeL\"], use_stemmer=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_X9vv_gMORkr" + }, + "source": [ + "- Define function to calculate rouge score" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "P6C6EkvFOQzW" + }, + "outputs": [], + "source": [ + "def get_rouge_score(groundTruth, prediction):\n", + " \"\"\"Function to compute rouge score.\n", + "\n", + " Args:\n", + " groundTruth: The ground truth text.\n", + " prediction: The predicted text.\n", + " Returns:\n", + " The rouge score.\n", + " \"\"\"\n", + " scores = scorer.score(target=groundTruth, prediction=prediction)\n", + " return scores" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J6qBe-Mbtem_" + }, + "source": [ + "- Single instance evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BtP0f3GO7zG7" + }, + "outputs": [], + "source": [ + "get_rouge_score(val_df.loc[0, \"ground_truth\"], val_df.loc[0, \"basePredictions\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3zl1PpGA9oWE" + }, + "outputs": [], + "source": [ + "def calculate_metrics(val_df, prediction_col=\"basePredictions\"):\n", + " \"\"\"Function to compute rouge scores for all instances in the validation dataset.\n", + " Args:\n", + " val_df: The validation dataframe.\n", + " prediction_col: The column name of the predictions.\n", + " Returns:\n", + " A dataframe containing the rouge scores.\n", + " \"\"\"\n", + " records = []\n", + " for row, instance in val_df.iterrows():\n", + " scores = get_rouge_score(instance[\"ground_truth\"], instance[prediction_col])\n", + " records.append(\n", + " {\n", + " \"rouge1_precision\": scores.get(\"rouge1\").precision,\n", + " \"rouge1_recall\": scores.get(\"rouge1\").recall,\n", + " \"rouge1_fmeasure\": scores.get(\"rouge1\").fmeasure,\n", + " \"rouge2_precision\": scores.get(\"rouge2\").precision,\n", + " \"rouge2_recall\": scores.get(\"rouge2\").recall,\n", + " \"rouge2_fmeasure\": scores.get(\"rouge2\").fmeasure,\n", + " \"rougeL_precision\": scores.get(\"rougeL\").precision,\n", + " \"rougeL_recall\": scores.get(\"rougeL\").recall,\n", + " \"rougeL_fmeasure\": scores.get(\"rougeL\").fmeasure,\n", + " }\n", + " )\n", + " metrics = pd.DataFrame(records)\n", + " return metrics" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SS2UrB9g8NBt" + }, + "outputs": [], + "source": [ + "evaluation_df_stats = calculate_metrics(val_df, prediction_col=\"basePredictions\")\n", + "evaluation_df_stats" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZEyRYhEBZwy9" + }, + "outputs": [], + "source": [ + "print(\"Mean rougeL_precision is\", evaluation_df_stats.rougeL_precision.mean())\n", + "print(\"Mean rougeL_recall is\", evaluation_df_stats.rougeL_recall.mean())\n", + "print(\"Mean rougeL_fmeasure is\", evaluation_df_stats.rougeL_fmeasure.mean())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uYAjjpdG_cpP" + }, + "source": [ + "## Fine-tune the model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EyqBRoY5rscI" + }, + "source": [ + "- `source_model`: Specifies the base Gemini model version you want to fine-tune.\n", + "- `train_dataset`: Path to your training data in JSONL format.\n", + "\n", + "
\n", + "\n", + " *Optional parameters*\n", + " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", + " - `tuned_model_display_name`: Display name for the tuned model.\n", + " - `epochs`: The number of training epochs to run.\n", + " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", + " - `adapter_size` : Gemini 1.5 Pro supports Adapter length [1, 4], default value is 4.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UJ0gxBeyqO9k" + }, + "source": [ + "**Note: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_vbe8o4_8qV6" + }, + "outputs": [], + "source": [ + "tuned_model_display_name = \"[DISPLAY NAME FOR TUNED MODEL]\" # @param {type:\"string\"}\n", + "\n", + "sft_tuning_job = sft.train(\n", + " source_model=base_model,\n", + " train_dataset=f\"{BUCKET_URI}/train/train.jsonl\",\n", + " # Optional:\n", + " validation_dataset=f\"{BUCKET_URI}/val/val.jsonl\",\n", + " tuned_model_display_name=tuned_model_display_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hSDpQGUeERcH" + }, + "outputs": [], + "source": [ + "job_name = sft_tuning_job.to_dict()[\"name\"]\n", + "job_name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dYQgzOr7KciG" + }, + "outputs": [], + "source": [ + "sft_tuning_job.to_dict()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RE1a3AgRsqJh" + }, + "source": [ + "**Note: Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc.**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qHlfSLjKsruX" + }, + "source": [ + "
\n", + "⚠️ It will take ~60mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "97EUpJwisv_Q" + }, + "outputs": [], + "source": [ + "%%time\n", + "# Wait for job completion\n", + "while not sft_tuning_job.refresh().has_ended:\n", + " time.sleep(60)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c-72sxLLgR1O" + }, + "outputs": [], + "source": [ + "sft_tuning_job.to_dict()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5680557f-67bd-4e8c-a383-02ab655246c5" + }, + "source": [ + "## Evaluation Post-tuning" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c3d1f75bddea" + }, + "source": [ + "- Evaluate the Gemini model on the validation dataset with tuned model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bK2Cyrhavw-Y" + }, + "outputs": [], + "source": [ + "tuning_job = sft.SupervisedTuningJob(job_name)\n", + "\n", + "# tuned model endpoint name\n", + "tuned_model_endpoint_name = tuning_job.tuned_model_endpoint_name\n", + "\n", + "tuned_model = GenerativeModel(tuned_model_endpoint_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "l4ZnefpvwFeN" + }, + "outputs": [], + "source": [ + "# Get experiment resource name from tuning job.\n", + "experiment_name = tuning_job.experiment.resource_name\n", + "experiment_name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "M1Ce0oIFwtoN" + }, + "outputs": [], + "source": [ + "# tuned model name\n", + "tuned_model_name = tuning_job.tuned_model_name\n", + "tuned_model_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w2OoRp4OuUlC" + }, + "source": [ + "- Get a prediction from base model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Bp4yHwjNJbLQ" + }, + "outputs": [], + "source": [ + "response = generation_model.generate_content(\n", + " contents=[Part.from_uri(query_image_uri, \"image/jpeg\"), task_prompt],\n", + " # Optional config\n", + " generation_config=GenerationConfig(\n", + " temperature=0.0,\n", + " ),\n", + ")\n", + "\n", + "print(response.text.strip())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oVEEGZ-cuYx2" + }, + "source": [ + "- Get a prediction from tuned model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VxjhXCupNkR9" + }, + "outputs": [], + "source": [ + "response = tuned_model.generate_content(\n", + " contents=[Part.from_uri(query_image_uri, \"image/jpeg\"), task_prompt],\n", + " # Optional config\n", + " generation_config=GenerationConfig(\n", + " temperature=0.0,\n", + " ),\n", + ")\n", + "\n", + "print(response.text.strip())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s_1-lbJZugY0" + }, + "source": [ + "- Evaluate the tuned model on entire validation set" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B7sRtCFCUiag" + }, + "source": [ + "
\n", + "⚠️ It will take 1-2 mins for the model to generate predictions on the provided validation dataset. ⚠️\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pWxg3i3a391K" + }, + "outputs": [], + "source": [ + "%%time\n", + "predictions_tuned = run_eval(val_df, model=tuned_model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "V0wJNPEf5-6I" + }, + "outputs": [], + "source": [ + "val_df.loc[:, \"tunedPredictions\"] = predictions_tuned" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "og4hVmwCuuPW" + }, + "outputs": [], + "source": [ + "evaluation_df_post_tuning_stats = calculate_metrics(\n", + " val_df, prediction_col=\"tunedPredictions\"\n", + ")\n", + "evaluation_df_post_tuning_stats" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "heKx9Lu5vBYb" + }, + "source": [ + "- Improvement" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "X2AVUCh3S656" + }, + "outputs": [], + "source": [ + "evaluation_df_post_tuning_stats.rougeL_precision.mean()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kTnfegPcvC-P" + }, + "outputs": [], + "source": [ + "improvement = round(\n", + " (\n", + " (\n", + " evaluation_df_post_tuning_stats.rougeL_precision.mean()\n", + " - evaluation_df_stats.rougeL_precision.mean()\n", + " )\n", + " / evaluation_df_stats.rougeL_precision.mean()\n", + " )\n", + " * 100,\n", + " 2,\n", + ")\n", + "print(\n", + " f\"Model tuning has improved the rougeL_precision by {improvement}% (result might differ based on each tuning iteration)\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qrs0o6-p6Ebr" + }, + "outputs": [], + "source": [ + "# Save predicitons\n", + "predictions_all = val_df.to_csv(\"validation_pred.csv\", index=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yUuvCQ2O-1OW" + }, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "me908QT9-26J" + }, + "source": [ + "Performance could be further improved:\n", + "- By adding more training samples. In general, improve your training data quality and/or quantity towards getting a more diverse and comprehensive dataset for your task\n", + "- By tuning the hyperparameters, such as epochs, learning rate multiplier or adapter size\n", + " - To find the optimal number of epochs for your dataset, we recommend experimenting with different values. While increasing epochs can lead to better performance, it's important to be mindful of overfitting, especially with smaller datasets. If you see signs of overfitting, reducing the number of epochs can help mitigate the issue\n", + "- You may try different prompt structures/formats and opt for the one with better performance" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F7pq-hvxvy8_" + }, + "source": [ + "## Cleaning up" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LokkxNS0vzM-" + }, + "source": [ + "To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\n", + "project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n", + "\n", + "\n", + "Otherwise, you can delete the individual resources you created in this tutorial.\n", + "\n", + "Refer to this [instructions](https://cloud.google.com/vertex-ai/docs/tutorials/image-classification-custom/cleanup#delete_resources) to delete the resources from console." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "H38EHjj3vwib" + }, + "outputs": [], + "source": [ + "# Delete Experiment.\n", + "delete_experiments = True\n", + "if delete_experiments:\n", + " experiments_list = aiplatform.Experiment.list()\n", + " for experiment in experiments_list:\n", + " if experiment.resource_name == experiment_name:\n", + " print(experiment.resource_name)\n", + " experiment.delete()\n", + " break\n", + "\n", + "print(\"***\" * 10)\n", + "\n", + "# Delete Endpoint.\n", + "delete_endpoint = True\n", + "# If force is set to True, all deployed models on this\n", + "# Endpoint will be first undeployed.\n", + "if delete_endpoint:\n", + " for endpoint in aiplatform.Endpoint.list():\n", + " if endpoint.resource_name == tuned_model_endpoint_name:\n", + " print(endpoint.resource_name)\n", + " endpoint.delete(force=True)\n", + " break\n", + "\n", + "print(\"***\" * 10)\n", + "\n", + "# Delete Cloud Storage Bucket.\n", + "delete_bucket = True\n", + "if delete_bucket:\n", + " ! gsutil -m rm -r $BUCKET_URI" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [ + "Dw-gQpLXTe67", + "uYAjjpdG_cpP", + "5680557f-67bd-4e8c-a383-02ab655246c5" + ], + "name": "supervised_finetuning_using_gemini_on_image_data.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb index 79cd82d819..5acb34d8fa 100644 --- a/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb +++ b/gemini/tuning/vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb @@ -127,22 +127,18 @@ "cell_type": "code", "execution_count": 25, "metadata": { - "id": "XRvKdaPDTznN", - "outputId": "30089739-607e-433e-8592-6e5b44e914c4", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "XRvKdaPDTznN" }, "outputs": [ { - "output_type": "execute_result", "data": { "text/plain": [ "{'status': 'ok', 'restart': True}" ] }, + "execution_count": 25, "metadata": {}, - "execution_count": 25 + "output_type": "execute_result" } ], "source": [ @@ -250,50 +246,70 @@ "import dataclasses\n", "import json\n", "\n", + "from google.cloud import storage\n", "import numpy as np\n", "import tensorflow as tf\n", "from vertexai.generative_models import Content, Part\n", - "from vertexai.preview.tokenization import get_tokenizer_for_model\n", - "from google.cloud import storage" + "from vertexai.preview.tokenization import get_tokenizer_for_model" ] }, { "cell_type": "markdown", + "metadata": { + "id": "wvqIIG1M0YCy" + }, "source": [ "### Load the dataset\n", "\n", "This example is for text only. Define the Google Cloud Storage URIs pointing to your training and validation datasets or continue using the URIs provided." - ], - "metadata": { - "id": "wvqIIG1M0YCy" - } + ] }, { "cell_type": "code", + "execution_count": 135, + "metadata": { + "id": "oue9Q0GG0Rvk" + }, + "outputs": [], "source": [ "BASE_MODEL = \"gemini-1.5-pro-002\" # @param ['gemini-1.5-pro-002']{type:\"string\"}\n", "training_dataset_uri = \"gs://github-repo/generative-ai/gemini/tuning/train_sft_train_samples.jsonl\" # @param {type:\"string\"}\n", "validation_dataset_uri = \"gs://github-repo/generative-ai/gemini/tuning/val_sft_val_samples.jsonl\" # @param {type:\"string\"}\n", "\n", "tokenizer = get_tokenizer_for_model(\"gemini-1.5-pro-001\")" - ], - "metadata": { - "id": "oue9Q0GG0Rvk" - }, - "execution_count": 135, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "We'll now load the dataset and conduct some basic statistical analysis to understand its structure and content.\n" - ], "metadata": { "id": "dbl6UD5P3LIH" - } + }, + "source": [ + "We'll now load the dataset and conduct some basic statistical analysis to understand its structure and content.\n" + ] }, { "cell_type": "code", + "execution_count": 136, + "metadata": { + "id": "M9TrzApr1tYQ" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Num training examples: 500\n", + "First example:\n", + "{'role': 'user', 'parts': [{'text': 'Honesty is usually the best policy. It is disrespectful to lie to someone. If you don\\'t want to date someone, you should say so. Sometimes it is easy to be honest. For example, you might be able to truthfully say, \"No, thank you, I already have a date for that party.\" Other times, you might need to find a kinder way to be nice. Maybe you are not attracted to the person. Instead of bluntly saying that, try saying, \"No, thank you, I just don\\'t think we would be a good fit.\" Avoid making up a phony excuse. For instance, don\\'t tell someone you will be out of town this weekend if you won\\'t be. There\\'s a chance that you might then run into them at the movies, which would definitely cause hurt feelings. A compliment sandwich is a really effective way to provide feedback. Essentially, you \"sandwich\" your negative comment between two positive things. Try using this method when you need to reject someone. An example of a compliment sandwich is to say something such as, \"You\\'re an awesome person. Unfortunately, I\\'m not interested in dating you. Someone else is going to be really lucky to date someone with such a great personality!\" You could also try, \"You are a really nice person. I\\'m only interested you as a friend. I like when we hang out in big groups together!\" Be sincere. If you offer false compliments, the other person will likely be able to tell and feel hurt. If you do not want to date someone, it is best to be upfront about your feelings. Do not beat around the bush. If your mind is made up, it is best to clearly state your response. If someone asks you to date them and you don\\'t want to, you can be direct and kind at the same time. State your answer clearly. You can make your feelings clear without purposefully hurting someone else\\'s feelings. Try smiling and saying, \"That sounds fun, but no thank you. I\\'m not interested in dating you.\" Don\\'t beat around the bush. If you do not want to accept the date, there is no need to say, \"Let me think about it.\" It is best to get the rejection over with. You don\\'t want to give someone false hope. Avoid saying something like, \"Let me check my schedule and get back to you.\" Try to treat the person the way you would want to be treated. This means that you should choose your words carefully. Be thoughtful in your response. It\\'s okay to pause before responding. You might be taken by surprise and need a moment to collect your thoughts. Say thank you. It is a compliment to be asked out. You can say, \"I\\'m flattered. Unfortunately, I can\\'t accept.\" Don\\'t laugh. Many people laugh nervously in awkward situations. Try to avoid giggling, as that is likely to result in hurt feelings. Sometimes it is not what you say, but how you say it. If you need to reject someone, think about factors other than your words. Non-verbal communication matters, too. Use the right tone of voice. Try to sound gentle but firm. Make eye contact. This helps convey that you are being serious, and also shows respect for the other person. If you are in public, try not to speak too loudly. It is not necessary for everyone around you to know that you are turning down a date.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n'}]}\n", + "CountTokensResult(total_tokens=730)\n", + "{'role': 'model', 'parts': [{'text': 'Tell the truth. Use a \"compliment sandwich\". Be direct. Treat the person with respect. Communicate effectively.'}]}\n", + "CountTokensResult(total_tokens=23)\n", + "Num validation examples: 100\n" + ] + } + ], "source": [ "example_training_dataset = []\n", "example_validation_dataset = []\n", @@ -304,9 +320,12 @@ " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", " ]\n", "except KeyError as e:\n", - " print(f\"KeyError: Please check if your file '{training_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\")\n", + " print(\n", + " f\"KeyError: Please check if your file '{training_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\"\n", + " )\n", " # Exit the script if there's an error in the training data\n", " import sys\n", + "\n", " sys.exit(1)\n", "\n", "print()\n", @@ -318,9 +337,12 @@ " json.loads(dataset_line) for dataset_line in dataset_jsonl_file\n", " ]\n", " except KeyError as e:\n", - " print(f\"KeyError: Please check if your file '{validation_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\")\n", + " print(\n", + " f\"KeyError: Please check if your file '{validation_dataset_uri}' is a JSONL file with correct JSON format. Error: {e}\"\n", + " )\n", " # Exit the script if there's an error in the validation data\n", " import sys\n", + "\n", " sys.exit(1)\n", "\n", "# Initial dataset stats\n", @@ -334,34 +356,13 @@ "\n", "if example_validation_dataset:\n", " print(\"Num validation examples:\", len(example_validation_dataset))" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "M9TrzApr1tYQ", - "outputId": "2e19860a-0bf6-446c-8bcc-e262c3c3833c" - }, - "execution_count": 136, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "Num training examples: 500\n", - "First example:\n", - "{'role': 'user', 'parts': [{'text': 'Honesty is usually the best policy. It is disrespectful to lie to someone. If you don\\'t want to date someone, you should say so. Sometimes it is easy to be honest. For example, you might be able to truthfully say, \"No, thank you, I already have a date for that party.\" Other times, you might need to find a kinder way to be nice. Maybe you are not attracted to the person. Instead of bluntly saying that, try saying, \"No, thank you, I just don\\'t think we would be a good fit.\" Avoid making up a phony excuse. For instance, don\\'t tell someone you will be out of town this weekend if you won\\'t be. There\\'s a chance that you might then run into them at the movies, which would definitely cause hurt feelings. A compliment sandwich is a really effective way to provide feedback. Essentially, you \"sandwich\" your negative comment between two positive things. Try using this method when you need to reject someone. An example of a compliment sandwich is to say something such as, \"You\\'re an awesome person. Unfortunately, I\\'m not interested in dating you. Someone else is going to be really lucky to date someone with such a great personality!\" You could also try, \"You are a really nice person. I\\'m only interested you as a friend. I like when we hang out in big groups together!\" Be sincere. If you offer false compliments, the other person will likely be able to tell and feel hurt. If you do not want to date someone, it is best to be upfront about your feelings. Do not beat around the bush. If your mind is made up, it is best to clearly state your response. If someone asks you to date them and you don\\'t want to, you can be direct and kind at the same time. State your answer clearly. You can make your feelings clear without purposefully hurting someone else\\'s feelings. Try smiling and saying, \"That sounds fun, but no thank you. I\\'m not interested in dating you.\" Don\\'t beat around the bush. If you do not want to accept the date, there is no need to say, \"Let me think about it.\" It is best to get the rejection over with. You don\\'t want to give someone false hope. Avoid saying something like, \"Let me check my schedule and get back to you.\" Try to treat the person the way you would want to be treated. This means that you should choose your words carefully. Be thoughtful in your response. It\\'s okay to pause before responding. You might be taken by surprise and need a moment to collect your thoughts. Say thank you. It is a compliment to be asked out. You can say, \"I\\'m flattered. Unfortunately, I can\\'t accept.\" Don\\'t laugh. Many people laugh nervously in awkward situations. Try to avoid giggling, as that is likely to result in hurt feelings. Sometimes it is not what you say, but how you say it. If you need to reject someone, think about factors other than your words. Non-verbal communication matters, too. Use the right tone of voice. Try to sound gentle but firm. Make eye contact. This helps convey that you are being serious, and also shows respect for the other person. If you are in public, try not to speak too loudly. It is not necessary for everyone around you to know that you are turning down a date.\\n\\nProvide a summary of the article in two or three sentences:\\n\\n'}]}\n", - "CountTokensResult(total_tokens=730)\n", - "{'role': 'model', 'parts': [{'text': 'Tell the truth. Use a \"compliment sandwich\". Be direct. Treat the person with respect. Communicate effectively.'}]}\n", - "CountTokensResult(total_tokens=23)\n", - "Num validation examples: 100\n" - ] - } ] }, { "cell_type": "markdown", + "metadata": { + "id": "L5RhrH6r4NrC" + }, "source": [ "You can perform various error checks to validate that each tuning example in the dataset adheres to the format expected by the tuning API. Errors are categorized based on their nature for easier debugging. \n", " \n", @@ -375,20 +376,20 @@ "6. **Parts List Validation:** Verifies that the `parts` key contains a list. Error type: `missing_or_invalid_parts`.\n", "7. **Part Format:** Checks if each part in the `parts` list is a dictionary and contains the key `text`. Error type: `invalid_part`.\n", "8. **Text Validation:** Ensures that the `text` key has textual data and is a string. Error type: `missing_text`.\n", - "9. **Consecutive Turns:** For the chat history, it is enforced that the message roles alternate (user, then model, then user, etc.). Error type: `consecutive_turns`. This check is not applicable for systemInstruction.\n", - "\n", - "\n", - "\n" - ], - "metadata": { - "id": "L5RhrH6r4NrC" - } + "9. **Consecutive Turns:** For the chat history, it is enforced that the message roles alternate (user, then model, then user, etc.). Error type: `consecutive_turns`. This check is not applicable for systemInstruction.\n" + ] }, { "cell_type": "code", + "execution_count": 137, + "metadata": { + "id": "S2FVNbIX0R0n" + }, + "outputs": [], "source": [ "from collections import defaultdict\n", "\n", + "\n", "def validate_dataset_format(dataset):\n", " \"\"\"Validates the dataset.\n", "\n", @@ -413,7 +414,12 @@ " if system_instruction:\n", " try:\n", " # Validate the list within \"parts\"\n", - " validate_contents(system_instruction.get(\"parts\", []), format_errors, row_idx, is_system_instruction=True)\n", + " validate_contents(\n", + " system_instruction.get(\"parts\", []),\n", + " format_errors,\n", + " row_idx,\n", + " is_system_instruction=True,\n", + " )\n", " except (TypeError, AttributeError, KeyError) as e:\n", " print(\"Invalid input during system instruction validation: %s\", e)\n", " format_errors[\"invalid_system_instruction\"].append(row_idx)\n", @@ -456,37 +462,25 @@ " return\n", "\n", " # Skip key checks for system instructions\n", - " if not is_system_instruction and (\"role\" not in content_item or \"parts\" not in content_item):\n", + " if not is_system_instruction and (\n", + " \"role\" not in content_item or \"parts\" not in content_item\n", + " ):\n", " format_errors[\"content_item_missing_key\"].append(row_index)\n", " return\n", "\n", " # ... (rest of the validation logic remains the same)" - ], - "metadata": { - "id": "S2FVNbIX0R0n" - }, - "execution_count": 137, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "validate_dataset_format(example_training_dataset)\n", - "if example_validation_dataset:\n", - " validate_dataset_format(example_validation_dataset)" - ], + "execution_count": 138, "metadata": { - "id": "nSLnbeJ00R2v", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "8f680322-45cc-4047-e76d-f7c3932d0439" + "id": "nSLnbeJ00R2v" }, - "execution_count": 138, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "No errors found for this dataset.\n", "Warning: systemInstruction is missing in some rows.\n", @@ -494,10 +488,18 @@ "Warning: systemInstruction is missing in some rows.\n" ] } + ], + "source": [ + "validate_dataset_format(example_training_dataset)\n", + "if example_validation_dataset:\n", + " validate_dataset_format(example_validation_dataset)" ] }, { "cell_type": "markdown", + "metadata": { + "id": "SJFwhPid_7oG" + }, "source": [ "### Utils for dataset analysis and token counting\n", "\n", @@ -506,13 +508,15 @@ "* Load and inspect sample data from the training and validation datasets.\n", "* Calculate token counts for messages to understand the dataset's characteristics.\n", "* Define utility functions for calculating token distributions and dataset statistics. These will help assess the suitability of your data for supervised tuning and estimate potential costs." - ], - "metadata": { - "id": "SJFwhPid_7oG" - } + ] }, { "cell_type": "code", + "execution_count": 140, + "metadata": { + "id": "al_uUWOP4Ss2" + }, + "outputs": [], "source": [ "@dataclasses.dataclass\n", "class DatasetDistribution:\n", @@ -564,15 +568,15 @@ "\n", "MAX_TOKENS_PER_EXAMPLE = 32 * 1024\n", "ESTIMATE_PADDING_TOKEN_PER_EXAMPLE = 8" - ], - "metadata": { - "id": "al_uUWOP4Ss2" - }, - "execution_count": 140, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 141, + "metadata": { + "id": "21xgvR3g0R5P" + }, + "outputs": [], "source": [ "def calculate_distribution_for_population(population) -> DatasetDistribution:\n", " \"\"\"Calculates the distribution from the population of values.\n", @@ -607,12 +611,16 @@ " # Handle optional systemInstruction\n", " system_instruction = example.get(\"systemInstruction\")\n", " if system_instruction:\n", - " text = system_instruction.get(\"parts\")[0].get(\"text\") # Assuming single part in system instruction\n", + " text = system_instruction.get(\"parts\")[0].get(\n", + " \"text\"\n", + " ) # Assuming single part in system instruction\n", " input.append(Content(role=\"system\", parts=[Part.from_text(text)]))\n", "\n", " for content_item in example[\"contents\"]:\n", " role = content_item.get(\"role\").lower()\n", - " text = content_item.get(\"parts\")[0].get(\"text\") # Assuming single part in content item\n", + " text = content_item.get(\"parts\")[0].get(\n", + " \"text\"\n", + " ) # Assuming single part in content item\n", "\n", " if role.lower() == \"model\":\n", " result = tokenizer.count_tokens(input)\n", @@ -674,6 +682,7 @@ " ),\n", " )\n", "\n", + "\n", "def print_dataset_stats(dataset):\n", " dataset_stats = get_dataset_stats_for_dataset(dataset)\n", " print(\"Below you can find the dataset statistics:\")\n", @@ -693,42 +702,27 @@ " f\"User output token length distribution: {dataset_stats.user_output_token_length_stats}\"\n", " )\n", " return dataset_stats" - ], - "metadata": { - "id": "21xgvR3g0R5P" - }, - "execution_count": 141, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Next you can analyze the structure and token counts of your datasets." - ], "metadata": { "id": "-FF4ReY6Atw3" - } + }, + "source": [ + "Next you can analyze the structure and token counts of your datasets." + ] }, { "cell_type": "code", - "source": [ - "training_dataset_stats = print_dataset_stats(example_training_dataset)\n", - "\n", - "if example_validation_dataset:\n", - " validation_dataset_stats = print_dataset_stats(example_validation_dataset)" - ], + "execution_count": 142, "metadata": { - "id": "sZqsWno60R7O", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "8b78aea8-fbfa-4f48-9ab0-03bdca05f9f9" + "id": "sZqsWno60R7O" }, - "execution_count": 142, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "\n", "0 examples may be over the 32768 token limit, they will be truncated during tuning.\n", @@ -748,10 +742,19 @@ "User output token length distribution: DatasetDistribution(sum=3819, max=165, min=8, mean=38.19, median=32.0, p5=17, p95=76)\n" ] } + ], + "source": [ + "training_dataset_stats = print_dataset_stats(example_training_dataset)\n", + "\n", + "if example_validation_dataset:\n", + " validation_dataset_stats = print_dataset_stats(example_validation_dataset)" ] }, { "cell_type": "markdown", + "metadata": { + "id": "KFWbXu17DfiS" + }, "source": [ "### Cost Estimation for Supervised Fine-tuning\n", "In this final section, you will estimate the total cost for supervised fine-tuning based on the number of tokens processed. The number of tokens used will be charged to you. Please refer to the [pricing page for the rate](https://cloud.google.com/vertex-ai/generative-ai/pricing#gemini-models).\n", @@ -759,13 +762,25 @@ "**Important Note:** The final cost may vary slightly from this estimate due to dataset formatting and truncation logic during training.\n", "\n", "The code calculates the total number of billable tokens by summing up the tokens from the training dataset and (if provided) the validation dataset. Then, it estimates the total cost by multiplying the total billable tokens with the number of training epochs (default is 4)." - ], - "metadata": { - "id": "KFWbXu17DfiS" - } + ] }, { "cell_type": "code", + "execution_count": 143, + "metadata": { + "id": "k3ZJ_8fQ0R9x" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dataset has ~309397 tokens that will be charged\n", + "By default, you'll train for 4 epochs on this dataset.\n", + "By default, you'll be charged for ~1237588 tokens.\n" + ] + } + ], "source": [ "epoch_count = 4 # @param {type:\"integer\"}\n", "if epoch_count is None:\n", @@ -785,51 +800,41 @@ "print(\n", " f\"By default, you'll be charged for ~{epoch_count * total_number_of_billable_tokens} tokens.\"\n", ")" - ], - "metadata": { - "id": "k3ZJ_8fQ0R9x", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "9525fd37-0fc4-44dc-a97e-99a9e44748c0" - }, - "execution_count": 143, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Dataset has ~309397 tokens that will be charged\n", - "By default, you'll train for 4 epochs on this dataset.\n", - "By default, you'll be charged for ~1237588 tokens.\n" - ] - } ] }, { "cell_type": "markdown", - "source": [ - "## Convert `Gemini 1.0 Pro` fine-tuning dataset to `Gemini 1.5 Pro` dataset." - ], "metadata": { "id": "K1EMMeRfH14a" - } + }, + "source": [ + "## Convert `Gemini 1.0 Pro` fine-tuning dataset to `Gemini 1.5 Pro` dataset." + ] }, { "cell_type": "code", - "source": [ - "source_uri = \"gs://next-23-tuning-demo/example-fine-tuning.json\" # @param {type:\"string\"}\n", - "destination_uri = \"gs://next-23-tuning-demo/new-data-format.jsonl\" # @param {type:\"string\"}\n", - "system_instruction = \"You are a helpful and friendly AI assistant\" # Optional" - ], + "execution_count": 144, "metadata": { "id": "oOKZgdSLJUFx" }, - "execution_count": 144, - "outputs": [] + "outputs": [], + "source": [ + "source_uri = (\n", + " \"gs://next-23-tuning-demo/example-fine-tuning.json\" # @param {type:\"string\"}\n", + ")\n", + "destination_uri = (\n", + " \"gs://next-23-tuning-demo/new-data-format.jsonl\" # @param {type:\"string\"}\n", + ")\n", + "system_instruction = \"You are a helpful and friendly AI assistant\" # Optional" + ] }, { "cell_type": "code", + "execution_count": 117, + "metadata": { + "id": "fgNjg3Y4CSq8" + }, + "outputs": [], "source": [ "def convert_jsonl_format(\n", " source_uri: str,\n", @@ -857,7 +862,7 @@ " dest_blob = dest_bucket.blob(dest_blob_name)\n", "\n", " # Download the source JSONL file\n", - " source_data = source_blob.download_as_string().decode('utf-8')\n", + " source_data = source_blob.download_as_string().decode(\"utf-8\")\n", "\n", " new_data = []\n", " for line in source_data.splitlines():\n", @@ -890,16 +895,13 @@ " if system_instruction:\n", " new_json_data[\"systemInstruction\"] = {\n", " \"role\": \"system\",\n", - " \"parts\": [{\"text\": system_instruction}]\n", + " \"parts\": [{\"text\": system_instruction}],\n", " }\n", "\n", " new_json_data[\"contents\"] = [] # Initialize \"contents\" after \"systemInstruction\"\n", "\n", " for message in json_data.get(\"messages\", []):\n", - " new_message = {\n", - " \"role\": message[\"role\"],\n", - " \"parts\": [{\"text\": message[\"content\"]}]\n", - " }\n", + " new_message = {\"role\": message[\"role\"], \"parts\": [{\"text\": message[\"content\"]}]}\n", " new_json_data[\"contents\"].append(new_message)\n", "\n", " return new_json_data\n", @@ -918,46 +920,37 @@ " raise ValueError(\"Invalid Google Cloud Storage URI\")\n", " parts = gcs_uri[5:].split(\"/\", 1)\n", " return parts[0], parts[1]" - ], - "metadata": { - "id": "fgNjg3Y4CSq8" - }, - "execution_count": 117, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "convert_jsonl_format(source_uri, destination_uri, system_instruction)" - ], + "execution_count": 118, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "WAqrR4yDH1LT", - "outputId": "2e351e1c-ee6c-40e6-857f-581b12fe3872" + "id": "WAqrR4yDH1LT" }, - "execution_count": 118, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Successfully converted and uploaded to gs://next-23-tuning-demo/new-data-format.jsonl\n" ] } + ], + "source": [ + "convert_jsonl_format(source_uri, destination_uri, system_instruction)" ] }, { "cell_type": "markdown", + "metadata": { + "id": "9k1GJaFIEvd-" + }, "source": [ "## Tuning token count and cost estimation for `Gemini 1.0 pro` legacy users.\n", "\n", "Only use this part if you still use `Gemini 1.0 pro`. Its best to upgrade to using [`gemini-1.5-pro-002`](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning)." - ], - "metadata": { - "id": "9k1GJaFIEvd-" - } + ] }, { "cell_type": "markdown", @@ -998,8 +991,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "PTvunHqRTHqe", - "outputId": "8d1aabc9-cf3b-4150-f768-c40d0d92c237" + "id": "PTvunHqRTHqe" }, "outputs": [ { @@ -1154,8 +1146,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "pUCpEmEFM0eX", - "outputId": "1bf39ccb-4898-4c44-9a6e-557e58694d7a" + "id": "pUCpEmEFM0eX" }, "outputs": [ { @@ -1384,8 +1375,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "uOWsUbwVXoTU", - "outputId": "c644fa82-1de4-4ba5-f9cf-44f4232917ee" + "id": "uOWsUbwVXoTU" }, "outputs": [ { @@ -1449,8 +1439,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "DVIpbaGYRJQc", - "outputId": "2e2f78cc-2005-4965-af26-a1cc5627e7ee" + "id": "DVIpbaGYRJQc" }, "outputs": [ { @@ -1492,8 +1481,8 @@ "dmWOrTJ3gx13", "DF4l8DTdWgPY" ], - "toc_visible": true, - "provenance": [] + "name": "vertexai_supervised_tuning_token_count_and_cost_estimation.ipynb", + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", @@ -1502,4 +1491,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From d0998bd22b16f1296e927c4860fba9e30e27a289 Mon Sep 17 00:00:00 2001 From: Riccardo Carlesso Date: Wed, 2 Oct 2024 17:37:25 +0200 Subject: [PATCH 33/76] fix: Update compare_generative_ai_models.ipynb (#1202) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding `plotly` as the notebook fails otherwise. # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ ] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- gemini/evaluation/compare_generative_ai_models.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemini/evaluation/compare_generative_ai_models.ipynb b/gemini/evaluation/compare_generative_ai_models.ipynb index 995b75207a..524332bfd5 100644 --- a/gemini/evaluation/compare_generative_ai_models.ipynb +++ b/gemini/evaluation/compare_generative_ai_models.ipynb @@ -103,7 +103,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --user --quiet google-cloud-aiplatform[evaluation]" + "%pip install --upgrade --user --quiet google-cloud-aiplatform[evaluation] plotly" ] }, { From 800d05b4a3a28fe23b971e702e76df0c41e6d308 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:44:49 -0500 Subject: [PATCH 34/76] ci: Updates to nox formatter (#1203) - Changed `nox -s format` to run nbqa formatters and allowlist sort - Cleaned up Contributing guide instructions - Fixed lint/spelling errors --------- Co-authored-by: Owl Bot --- .github/actions/spelling/allow.txt | 17 +++++++ .github/actions/spelling/excludes.txt | 1 - .github/workflows/linter.yaml | 1 + CONTRIBUTING.md | 36 ++----------- .../src/champion_challenger_pipeline.py | 2 +- .../src/submit_pipeline.py | 2 +- .../finance-advisor-spanner/database.py | 2 +- .../backend/indexing/run_parse_embed_index.py | 2 +- .../backend/indexing/vector_search_utils.py | 1 + .../backend/rag/async_extensions.py | 1 + .../backend/rag/claude_vertex.py | 1 + .../llamaindex-rag/backend/rag/evaluate.py | 1 + .../backend/rag/node_reranker.py | 1 + .../backend/rag/parent_retriever.py | 1 + .../llamaindex-rag/backend/rag/prompts.py | 1 + .../backend/rag/qa_followup_retriever.py | 1 + .../NLP2SQL_using_dynamic_RAG.ipynb | 4 +- .../small_to_big_rag/small_to_big_rag.ipynb | 13 +++-- ...nslation_training_data_tsv_generator.ipynb | 6 +-- noxfile.py | 51 ++++++++++--------- owlbot.py | 11 ---- search/web-app/consts.py | 10 ++-- 22 files changed, 76 insertions(+), 90 deletions(-) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 13d6ed4657..8fc2c985e3 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -139,6 +139,7 @@ LUVBPTK Ladhak Lego Llion +Logrus Lottry MSCHF MSGSEND @@ -158,6 +159,7 @@ Mvar NARI NCCREATE NDEBUG +NGRAM NGRAMS NMT NOMINMAX @@ -213,6 +215,7 @@ Shazeer Shenzhou Simpsons Siri +Skaffold Sketchfab Smartbuy Storrer @@ -287,11 +290,13 @@ arXiv aretrieve argmax arun +arxiv astype autoflake autogen automl autoptr +autorater autosxs backticks bagchi @@ -299,6 +304,7 @@ barmode barpolar baxis bbc +bigframes bigquery bitcoin boundings @@ -310,11 +316,13 @@ caxis cctv cfbundle chatbots +chromadb claude clickable cmap codebase codebases +codefile codelab codelabs colab @@ -357,6 +365,7 @@ ekg elous emb embs +embvs emojis ename epoc @@ -370,6 +379,7 @@ fewshot ffi figsize fillmode +fillna firestore flac floormat @@ -385,6 +395,7 @@ fulltext funtion gapic gboolean +gbq gchar gcloud gcs @@ -406,7 +417,9 @@ goooooood gpt gpu gradio +gradlew gridcolor +grpcio gspread gsutil gtk @@ -463,6 +476,7 @@ lexer linalg linecolor linted +linting llm llms logprobs @@ -596,6 +610,7 @@ tabular tagline tencel termcolor +terraform textno tfhub tfidf @@ -608,6 +623,7 @@ tobytes toself tqdm tritan +tsv ubuntu undst unigram @@ -645,6 +661,7 @@ xcscheme xctest xlabel xticks +xxxxxxxx yaxes yaxis ylabel diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt index b551f43871..74ed9f32e1 100644 --- a/.github/actions/spelling/excludes.txt +++ b/.github/actions/spelling/excludes.txt @@ -108,4 +108,3 @@ ignore$ ^\Qsearch/bulk-question-answering/bulk_question_answering_output.tsv\E$ ^\Q.github/workflows/issue_assigner/assign_issue.py\E$ ^\Qnoxfile.py\E$ -^\owlbot.py\E$ diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index 11b68f1345..e9e1139ec1 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -57,6 +57,7 @@ jobs: LOG_LEVEL: WARN SHELLCHECK_OPTS: -e SC1091 -e 2086 VALIDATE_ALL_CODEBASE: false + VALIDATE_PYTHON_PYINK: false VALIDATE_PYTHON_ISORT: false VALIDATE_TYPESCRIPT_STANDARD: false # super-linter/super-linter#4445 VALIDATE_CHECKOV: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fd7687a10f..fa992d0654 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ again. ## Notebook Template -If you're creating a Jupyter Notebook, use `/gemini/getting-started/intro_gemini_python.ipynb` as a template. +If you're creating a Jupyter Notebook, use [`notebook_template.ipynb`](notebook_template.ipynb) as a template. ## Code Quality Checks @@ -25,36 +25,11 @@ All notebooks in this project are checked for formatting and style, to ensure a consistent experience. To test notebooks prior to submitting a pull request, you can follow these steps. -From a command-line terminal (e.g. from Vertex Workbench or locally), install -the code analysis tools: - -```shell -pip3 install --user -U nbqa black flake8 isort pyupgrade git+https://github.com/tensorflow/docs -``` - -You'll likely need to add the directory where these were installed to your PATH: - -```shell -export PATH="$HOME/.local/bin:$PATH" -``` - -Then, set an environment variable for your notebook (or directory): - -```shell -export notebook="your-notebook.ipynb" -``` - -Finally, run this code block to check for errors. Each step will attempt to -automatically fix any issues. If the fixes can't be performed automatically, +From a command-line terminal (e.g. from Vertex AI Workbench or locally), +run this code block to format your code. +If the fixes can't be performed automatically, then you will need to manually address them before submitting your PR. -Note: For official, only submit one notebook per PR. - -```shell -python3 -m pip install -U -r .github/workflows/notebook_linter/requirements.txt -.github/workflows/notebook_linter/run_linter.sh -``` - ```shell python3 -m pip install --upgrade nox nox -s format @@ -69,8 +44,7 @@ information on using pull requests. ## Community Guidelines -This project follows [Google's Open Source Community -Guidelines](https://opensource.google/conduct/). +This project follows [Google's Open Source Community Guidelines](https://opensource.google/conduct/). ## Contributor Guide diff --git a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/champion_challenger_pipeline.py b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/champion_challenger_pipeline.py index 477b4aaee0..fa2cf13d5d 100644 --- a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/champion_challenger_pipeline.py +++ b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/champion_challenger_pipeline.py @@ -19,7 +19,7 @@ # mypy: disable-error-code="no-untyped-def, valid-type, no-untyped-def, assignment" -""" Champion Challenger Auto Side-by-side Evaluation Vertex AI Pipelines """ +"""Champion Challenger Auto Side-by-side Evaluation Vertex AI Pipelines""" from typing import NamedTuple diff --git a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/submit_pipeline.py b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/submit_pipeline.py index 5fcf069b0b..4bae460e7b 100644 --- a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/submit_pipeline.py +++ b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/submit_pipeline.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" Submit Vertex AI Pipeline run""" +"""Submit Vertex AI Pipeline run""" from google.cloud import aiplatform diff --git a/gemini/sample-apps/finance-advisor-spanner/database.py b/gemini/sample-apps/finance-advisor-spanner/database.py index da7a3f24aa..0b4b406019 100644 --- a/gemini/sample-apps/finance-advisor-spanner/database.py +++ b/gemini/sample-apps/finance-advisor-spanner/database.py @@ -1,4 +1,4 @@ -"""This file is for database operations done by the application """ +"""This file is for database operations done by the application""" # pylint: disable=line-too-long import os diff --git a/gemini/sample-apps/llamaindex-rag/backend/indexing/run_parse_embed_index.py b/gemini/sample-apps/llamaindex-rag/backend/indexing/run_parse_embed_index.py index af343adce4..96e69acc7e 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/indexing/run_parse_embed_index.py +++ b/gemini/sample-apps/llamaindex-rag/backend/indexing/run_parse_embed_index.py @@ -1,4 +1,4 @@ -"""Master script for parsing, embedding +"""Master script for parsing, embedding and indexing data living in a GCS bucket""" import asyncio diff --git a/gemini/sample-apps/llamaindex-rag/backend/indexing/vector_search_utils.py b/gemini/sample-apps/llamaindex-rag/backend/indexing/vector_search_utils.py index 03ac5cf727..f0df78c3df 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/indexing/vector_search_utils.py +++ b/gemini/sample-apps/llamaindex-rag/backend/indexing/vector_search_utils.py @@ -1,4 +1,5 @@ """Module for vector search utils.""" + from google.cloud import aiplatform diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/async_extensions.py b/gemini/sample-apps/llamaindex-rag/backend/rag/async_extensions.py index 981d7dc4d8..a539e1fc7e 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/async_extensions.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/async_extensions.py @@ -1,4 +1,5 @@ """Extensions to Llamaindex Base classes to allow for asynchronous execution""" + from collections.abc import Sequence import logging diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/claude_vertex.py b/gemini/sample-apps/llamaindex-rag/backend/rag/claude_vertex.py index 2ce3bdea67..4610f226fc 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/claude_vertex.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/claude_vertex.py @@ -1,4 +1,5 @@ """Llamaindex LLM implementation of Claude Vertex AI""" + from typing import Any from anthropic import AnthropicVertex, AsyncAnthropicVertex diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/evaluate.py b/gemini/sample-apps/llamaindex-rag/backend/rag/evaluate.py index 15fe6ff352..ad794ee9dc 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/evaluate.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/evaluate.py @@ -1,4 +1,5 @@ """Custom LLM Evaluator""" + import asyncio from collections.abc import Callable import logging diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/node_reranker.py b/gemini/sample-apps/llamaindex-rag/backend/rag/node_reranker.py index 89d5354b1b..9f5204790b 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/node_reranker.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/node_reranker.py @@ -1,4 +1,5 @@ """Node Re-ranker class for async execution""" + from collections.abc import Callable import logging diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/parent_retriever.py b/gemini/sample-apps/llamaindex-rag/backend/rag/parent_retriever.py index b4d130b61c..25db908251 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/parent_retriever.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/parent_retriever.py @@ -1,4 +1,5 @@ """Custom retriever which implements parent retrieval""" + import logging from llama_index.core import QueryBundle diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/prompts.py b/gemini/sample-apps/llamaindex-rag/backend/rag/prompts.py index f53033d806..0473331119 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/prompts.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/prompts.py @@ -1,4 +1,5 @@ """Prompt management class""" + from dataclasses import asdict, dataclass, field SYSTEM_PROMPT = "You are an expert assistant specializing in \ diff --git a/gemini/sample-apps/llamaindex-rag/backend/rag/qa_followup_retriever.py b/gemini/sample-apps/llamaindex-rag/backend/rag/qa_followup_retriever.py index c23ca8d4f9..1a0ef40f83 100644 --- a/gemini/sample-apps/llamaindex-rag/backend/rag/qa_followup_retriever.py +++ b/gemini/sample-apps/llamaindex-rag/backend/rag/qa_followup_retriever.py @@ -1,5 +1,6 @@ """Custom retriever which implements retrieval based on hypothetical questions""" + import logging from llama_index.core import QueryBundle diff --git a/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb b/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb index 0efe670b24..b633815e87 100644 --- a/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb +++ b/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb @@ -1196,9 +1196,7 @@ } ], "source": [ - "text_query = (\n", - " \"展示Foreign Currency Transactions 2023年10月的信息\" # @param {type:\"string\"}\n", - ")\n", + "text_query = \"展示Foreign Currency Transactions 2023年10月的信息\" # @param {type:\"string\"}\n", "find_similar_questions(df, text_query)" ] }, diff --git a/gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb b/gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb index be381a2c49..0b595ee9af 100644 --- a/gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb +++ b/gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb @@ -551,7 +551,7 @@ "source": [ "### Convert summaries to embeddings\n", "\n", - "Next, we'll convert the text summaries of each code file to vector embeddings. We'll store thsoe embeddings in an in-memory Chroma database. " + "Next, we'll convert the text summaries of each code file to vector embeddings. We'll store those embeddings in an in-memory Chroma database. " ] }, { @@ -588,14 +588,14 @@ "for index, row in df.iterrows():\n", " fn = row[\"filename\"]\n", " print(\"Getting embedding for: \", fn)\n", - " summ = row[\"summary\"]\n", - " print(summ)\n", - " e = get_text_embedding(summ)\n", + " summary = row[\"summary\"]\n", + " print(summary)\n", + " e = get_text_embedding(summary)\n", " print(e)\n", " # add vector embedding to in-memory Chroma database.\n", " # the \"small\" summary embedding is linked to the \"big\" raw code file through the metadata key, \"filename.\"\n", " collection.add(\n", - " embeddings=[e], documents=[summ], metadatas=[{\"filename\": fn}], ids=[fn]\n", + " embeddings=[e], documents=[summary], metadatas=[{\"filename\": fn}], ids=[fn]\n", " )" ] }, @@ -630,7 +630,7 @@ "id": "51cf5003f530" }, "source": [ - "The function below shows how we'll first try to inference Gemini wth small chunks (code file summaries). If Gemini can answer with that context, we return its response and we're done. If Gemini needs more context, we'll ask it what file it would like to see. Then, we'll directly retrieve the code file from the DataFrame, and pass it into Gemini again as the \"large\" context." + "The function below shows how we'll first try to inference Gemini with small chunks (code file summaries). If Gemini can answer with that context, we return its response and we're done. If Gemini needs more context, we'll ask it what file it would like to see. Then, we'll directly retrieve the code file from the DataFrame, and pass it into Gemini again as the \"large\" context." ] }, { @@ -826,7 +826,6 @@ " resource.null_resource.apply_deployment\n", " ]\n", "}\n", - "\n", "\"\"\"" ] }, diff --git a/language/translation/translation_training_data_tsv_generator.ipynb b/language/translation/translation_training_data_tsv_generator.ipynb index 4564732913..edd05dab7f 100644 --- a/language/translation/translation_training_data_tsv_generator.ipynb +++ b/language/translation/translation_training_data_tsv_generator.ipynb @@ -435,9 +435,9 @@ " \"Length of a pair detected to be greater than 200 words.\"\n", " )\n", " print(\"this pair will be skipped\")\n", - " more_than_200_words[\" \".join(src_row_data)] = (\n", - " \" \".join(ref_row_data)\n", - " )\n", + " more_than_200_words[\n", + " \" \".join(src_row_data)\n", + " ] = \" \".join(ref_row_data)\n", " else:\n", " tsv_f.write(\n", " \" \".join(src_row_data)\n", diff --git a/noxfile.py b/noxfile.py index 1ef53e1cea..30eecac2fc 100644 --- a/noxfile.py +++ b/noxfile.py @@ -110,9 +110,21 @@ def format(session): Run isort to sort imports. Then run black to format code to uniform standard. """ - session.install(BLACK_VERSION, ISORT_VERSION, "autoflake", "ruff") - # Use the --fss option to sort imports using strict alphabetical order. - # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.install( + "git+https://github.com/tensorflow/docs", + "ipython", + "jupyter", + "nbconvert", + "types-requests", + BLACK_VERSION, + "blacken-docs", + "pyupgrade", + ISORT_VERSION, + "nbqa", + "autoflake", + "nbformat", + "ruff", + ) session.run( "autoflake", "-i", @@ -126,6 +138,8 @@ def format(session): "--fix-only", *LINT_PATHS, ) + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections session.run( "isort", "--fss", @@ -135,28 +149,6 @@ def format(session): "black", *LINT_PATHS, ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def format_notebooks(session): - """ - Run isort to sort imports. Then run black - to format code to uniform standard. - """ - session.install( - "git+https://github.com/tensorflow/docs", - "ipython", - "jupyter", - "nbconvert", - "types-requests", - "black", - "blacken-docs", - "pyupgrade", - "isort", - "nbqa", - "autoflake", - "nbformat", - ) session.run("python3", ".github/workflows/update_notebook_links.py", ".") session.run( "nbqa", "pyupgrade", "--exit-zero-even-if-changed", "--py310-plus", *LINT_PATHS @@ -176,6 +168,15 @@ def format_notebooks(session): session.run("nbqa", "blacken-docs", "--nbqa-md", *LINT_PATHS) session.run("python3", "-m", "tensorflow_docs.tools.nbfmt", *LINT_PATHS) + # Sort Spelling Allowlist + spelling_allow_file = ".github/actions/spelling/allow.txt" + + with open(spelling_allow_file, encoding="utf-8") as file: + unique_words = sorted(set(file)) + + with open(spelling_allow_file, "w", encoding="utf-8") as file: + file.writelines(unique_words) + def install_unittest_dependencies(session, *constraints): standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES diff --git a/owlbot.py b/owlbot.py index 9f1ff224bb..8b9c29594d 100644 --- a/owlbot.py +++ b/owlbot.py @@ -33,14 +33,3 @@ # ---------------------------------------------------------------------------- s.shell.run(["nox", "-s", "format"], hide_output=False) - -s.shell.run(["nox", "-s", "format_notebooks"], hide_output=False) - -# Sort Spelling Allowlist -spelling_allow_file = ".github/actions/spelling/allow.txt" - -with open(spelling_allow_file, encoding="utf-8") as file: - unique_words = sorted(set(file)) - -with open(spelling_allow_file, "w", encoding="utf-8") as file: - file.writelines(unique_words) diff --git a/search/web-app/consts.py b/search/web-app/consts.py index 7a2145d620..c30c91308e 100644 --- a/search/web-app/consts.py +++ b/search/web-app/consts.py @@ -33,11 +33,11 @@ CUSTOM_UI_DATASTORE_IDS = [ { "name": "Google Cloud Website", - "engine_id": "google-cloud-site-search_xxxxxxxx", + "engine_id": "google-cloud-site-search", }, { "name": "Google Merchandise Store (Advanced Indexing)", - "engine_id": "google-merch-store_xxxxxxxx", + "engine_id": "google-merch-store", }, ] @@ -45,15 +45,15 @@ IMAGE_SEARCH_DATASTORE_IDs = [ { "name": "Google Merchandise Store", - "engine_id": "google-merch-store_xxxxxxx", + "engine_id": "google-merch-store", } ] RECOMMENDATIONS_DATASTORE_IDs = [ { "name": "arXiv Natural Language Papers", - "datastore_id": "arxiv_xxxxxxxxxx", - "engine_id": "arxiv-personalize_xxxxxxxx", + "datastore_id": "arxiv", + "engine_id": "arxiv-personalize", } ] From b35e6dc13da3108ca082302c8d0319f26c3bc451 Mon Sep 17 00:00:00 2001 From: Deepak moonat Date: Thu, 3 Oct 2024 10:35:19 +0530 Subject: [PATCH 35/76] update: markdown (#1206) # Description Update the author section with correct markdown format. Update Tuning section with additional info - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- ...inetuning_using_gemini_on_image_data.ipynb | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb index a9cf0b47bb..cd32b7ac72 100644 --- a/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb +++ b/gemini/tuning/supervised_finetuning_using_gemini_on_image_data.ipynb @@ -63,9 +63,9 @@ "id": "MgVK7IeKpW27" }, "source": [ - "| | | |\n", - "|-|-|-|\n", - "|Author(s) | [Deepak Moonat](https://github.com/dmoonat)" + "| | |\n", + "|-|-|\n", + "|Author(s) | [Deepak Moonat](https://github.com/dmoonat) |" ] }, { @@ -143,11 +143,11 @@ "id": "jCMczwd00N9T" }, "source": [ - "Dataset used in this notebook is about image captioning.\n", + "Dataset used in this notebook is about image captioning. [Reference](https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma#download_the_model_checkpoint)\n", "\n", - "[Reference](https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma#download_the_model_checkpoint)\n", - "\n", - "Licensed under the Creative Commons Attribution 4.0 License" + "```\n", + "Licensed under the Creative Commons Attribution 4.0 License\n", + "```" ] }, { @@ -1484,20 +1484,32 @@ "## Fine-tune the model" ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "d1263b90fbc4" + }, + "source": [ + "When you run a supervised fine-tuning job, the model learns additional parameters that help it encode the necessary information to perform the desired task or learn the desired behavior. These parameters are used during inference. The output of the tuning job is a new model that combines the newly learned parameters with the original model." + ] + }, { "cell_type": "markdown", "metadata": { "id": "EyqBRoY5rscI" }, "source": [ + "**Tuning Job parameters**\n", + "\n", "- `source_model`: Specifies the base Gemini model version you want to fine-tune.\n", "- `train_dataset`: Path to your training data in JSONL format.\n", "\n", - "
\n", "\n", - " *Optional parameters*\n", + " *Optional parameters*\n", " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", " - `tuned_model_display_name`: Display name for the tuned model.\n", + " \n", + " *Hyperparameters* \n", " - `epochs`: The number of training epochs to run.\n", " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", " - `adapter_size` : Gemini 1.5 Pro supports Adapter length [1, 4], default value is 4.\n" From ab2a7dfb09d9a711ef8ae54f6b8ef4754364992f Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Thu, 3 Oct 2024 18:47:42 +0200 Subject: [PATCH 36/76] feat: vapo with custom function (#1205) # Description This notebook demonstrates how to leverage Vertex AI Prompt Optimizer (Preview) to optimize a simple prompt for a Gemini model using your own metric. Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .../prompts/prompt_optimizer/utils/helpers.py | 26 +- .../vertex_ai_prompt_optimizer_sdk.ipynb | 15 +- ...i_prompt_optimizer_sdk_custom_metric.ipynb | 1363 +++++++++++++++++ 3 files changed, 1390 insertions(+), 14 deletions(-) create mode 100644 gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb diff --git a/gemini/prompts/prompt_optimizer/utils/helpers.py b/gemini/prompts/prompt_optimizer/utils/helpers.py index c20b10269c..0bf1e639db 100644 --- a/gemini/prompts/prompt_optimizer/utils/helpers.py +++ b/gemini/prompts/prompt_optimizer/utils/helpers.py @@ -15,6 +15,7 @@ import json import random import string +import subprocess from typing import Dict, List, Optional, Tuple, Union from IPython.display import HTML, Markdown, display @@ -59,6 +60,21 @@ def get_id(length: Union[int, None] = 8) -> str: return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) +def get_auth_token() -> None: + """A function to collect the authorization token""" + try: + result = subprocess.run( + ["gcloud", "auth", "print-identity-token", "-q"], + capture_output=True, + text=True, + check=True, + ) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + print(f"Error getting auth token: {e}") + return None + + @retry(wait=wait_random_exponential(multiplier=1, max=120)) async def async_generate(prompt: str, model: GenerativeModel) -> Union[str, None]: """Generate a response from the model.""" @@ -66,7 +82,7 @@ async def async_generate(prompt: str, model: GenerativeModel) -> Union[str, None [prompt], stream=False, ) - return response.text[0] if response.text else None + return response.text if response.text else None def evaluate_task( @@ -81,7 +97,7 @@ def evaluate_task( """Evaluate task using Vertex AI Evaluation.""" # Generate a unique id for the experiment run - id = get_id() + idx = get_id() # Rename the columns to match the expected format eval_dataset = df[[prompt_col, reference_col, response_col]].rename( @@ -108,7 +124,7 @@ def evaluate_task( ) # Evaluate the task - result = eval_task.evaluate(experiment_run_name=f"{experiment_name}-{id}") + result = eval_task.evaluate(experiment_run_name=f"{experiment_name}-{idx}") # Return the summary metrics return result.summary_metrics @@ -125,9 +141,7 @@ def print_df_rows( ) # Define the header style for the text - header_style = ( - "white-space: pre-wrap; width: 800px; overflow-x: auto; font-size: 16px;" - ) + header_style = "white-space: pre-wrap; width: 800px; overflow-x: auto; font-size: 16px; font-weight: bold;" # If columns are specified, filter the DataFrame if columns: diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb index 35f16a7d7b..762ec120c5 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb @@ -88,8 +88,7 @@ "source": [ "### Objective\n", "\n", - "This notebook demostrates how to leverage Vertex AI Prompt Optimizer (Preview) to efficiently migrate a prompt template from one model to another. The goal is to use Vertex AI Prompt Optimizer (Preview) to find the new prompt template which generate the most correct and grounded responses.\n", - "\n", + "This notebook demostrates how to leverage Vertex AI Prompt Optimizer (Preview) to optimize a simple prompt for a Gemini model using your own metrics. The goal is to use Vertex AI Prompt Optimizer (Preview) to find the new prompt template which generate the most correct and grounded responses.\n", "\n", "This tutorial uses the following Google Cloud ML services and resources:\n", "\n", @@ -175,10 +174,7 @@ }, "outputs": [], "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " ! mkdir -p ./utils && wget https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/prompts/prompt_optimizer/utils/helpers.py -P ./utils" + "! mkdir -p ./tutorial/utils && wget https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/prompts/prompt_optimizer/utils/helpers.py -P ./tutorial/utils" ] }, { @@ -683,7 +679,7 @@ "id": "Rp1n1aMACzSW" }, "source": [ - "### Translate the prompt template with Vertex AI Prompt Optimizer (Preview)\n" + "### Optimize the prompt template with Vertex AI Prompt Optimizer (Preview)\n" ] }, { @@ -702,7 +698,7 @@ "\n", "Vertex AI Prompt Optimizer enables the translation and optimization of the Instruction Template, while the Task/Context Template remains essential for evaluating different instruction templates.\n", "\n", - "In this case, you want to translate a prompt\n" + "In this case, you want to enhance or optimize a simple prompt template.\n" ] }, { @@ -850,6 +846,9 @@ " source_model=\"\",\n", " source_model_qps=\"\",\n", " source_model_location=\"\",\n", + " optimizer_model=\"gemini-1.5-pro-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " optimizer_model_qps=1,\n", + " optimizer_model_location=\"us-central1\",\n", " eval_model=\"gemini-1.5-pro-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", " eval_qps=1,\n", " eval_model_location=\"us-central1\",\n", diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb new file mode 100644 index 0000000000..fa8b39fca6 --- /dev/null +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb @@ -0,0 +1,1363 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Vertex Prompt Optimizer Notebook SDK (Preview) - Custom metric\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0ccc35a93b9f" + }, + "source": [ + "| | | |\n", + "|-|-|-|\n", + "| Author | [Ivan Nardini](https://github.com/inardini) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## I. Overview\n", + "\n", + "In the context of developing Generative AI (Gen AI) applications, prompt engineering poses challenges due to its time-consuming and error-prone nature. You often dedicate significant effort to crafting and inputting prompts to achieve successful task completion. Additionally, with the frequent release of foundational models, you face the additional burden of migrating working prompts from one model version to another.\n", + "\n", + "Vertex AI Prompt Optimizer aims to alleviate these challenges by providing you with an intelligent prompt optimization tool. With this tool you can both refine optimize system instruction (and task) in the prompts and selects the best demonstrations (few-shot examples) for prompt templates, empowering you to shape LLM responses from any source model to on a target Google model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4HKyj5KwYePX" + }, + "source": [ + "### Objective\n", + "\n", + "This notebook demostrates how to leverage Vertex AI Prompt Optimizer (Preview) to optimize a simple prompt for a Gemini model using your own metric. The goal is to use Vertex AI Prompt Optimizer (Preview) to find the new prompt template which generates responses according to your own metric.\n", + "\n", + "\n", + "This tutorial uses the following Google Cloud services and resources:\n", + "\n", + "- Vertex AI Gen AI\n", + "- Vertex AI Prompt Optimizer (Preview)\n", + "- Vertex AI Model Eval\n", + "- Vertex AI Custom job\n", + "- Cloud Run\n", + "\n", + "The steps performed include:\n", + "\n", + "- Prepare the prompt-ground truth pairs optimized for another model\n", + "- Define the prompt template you want to optimize\n", + "- Define and deploy your own custom evaluation metric on Cloud function\n", + "- Set optimization mode and steps\n", + "- Run the automatic prompt optimization job\n", + "- Collect the best prompt template and eval metric\n", + "- Validate the best prompt template" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "08d289fa873f" + }, + "source": [ + "### Dataset\n", + "\n", + "The dataset is a question-answering dataset generated by a simple AI cooking assistant that provides suggestions on how to cook healthier dishes.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aed92deeb4a0" + }, + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "* Vertex AI\n", + "* Cloud Storage\n", + "\n", + "Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing) and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## II. Before you start" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK for Python and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "! pip3 install --upgrade --quiet 'google-cloud-aiplatform[evaluation]' 'plotly' 'asyncio' 'tqdm' 'tenacity' 'etils' 'importlib_resources' 'fsspec' 'gcsfs' 'nbformat>=4.2.0'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "e55e2195ce2d" + }, + "outputs": [], + "source": [ + "! mkdir -p ./tutorial/utils && wget https://raw.githubusercontent.com/GoogleCloudPlatform/generative-ai/main/gemini/prompts/prompt_optimizer/utils/helpers.py -P ./tutorial/utils" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime (Colab only)\n", + "\n", + "To use the newly installed packages, you must restart the runtime on Google Colab." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " import IPython\n", + "\n", + " app = IPython.Application.instance()\n", + " app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "Authenticate your environment on Google Colab.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " try:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()\n", + " creds, project = google.auth.default()\n", + " if creds.token:\n", + " print(\"Authentication successful.\")\n", + " else:\n", + " print(\"Authentication successful, but no token was returned.\")\n", + " except Exception as e:\n", + " print(f\"Error during Colab authentication: {e}\")\n", + "\n", + "! gcloud auth login" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the following APIs](https://console.cloud.google.com/flows/enableapi?apiid=cloudresourcemanager.googleapis.com,aiplatform.googleapis.com,cloudfunctions.googleapis.com,run.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WReHDGG5g0XY" + }, + "source": [ + "#### Set your project ID and project number" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oM1iC_MfAts1" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "\n", + "# Set the project id\n", + "! gcloud config set project {PROJECT_ID}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oZpm-sL8f1z_" + }, + "outputs": [], + "source": [ + "PROJECT_NUMBER = !gcloud projects describe PROJECT_ID --format=\"get(projectNumber)\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "region" + }, + "source": [ + "#### Region\n", + "\n", + "You can also change the `REGION` variable used by Vertex AI. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "I6FmBV2_0fBP" + }, + "outputs": [], + "source": [ + "REGION = \"us-central1\" # @param {type: \"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zgPO1eR3CYjk" + }, + "source": [ + "#### Create a Cloud Storage bucket\n", + "\n", + "Create a storage bucket to store intermediate artifacts such as datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MzGDU7TWdts_" + }, + "outputs": [], + "source": [ + "BUCKET_NAME = \"your-bucket-name-{PROJECT_ID}-unique\" # @param {type:\"string\"}\n", + "\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NIq7R4HZCfIc" + }, + "outputs": [], + "source": [ + "! gsutil mb -l {REGION} -p {PROJECT_ID} {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "set_service_account" + }, + "source": [ + "#### Service Account and permissions\n", + "\n", + "Vertex AI Automated Prompt Design requires a service account with the following permissions:\n", + "\n", + "- `Vertex AI User` to call Vertex LLM API\n", + "- `Storage Object Admin` to read and write to your GCS bucket.\n", + "- `Artifact Registry Reader` to download the pipeline template from Artifact Registry.\n", + "- `Cloud Run Developer` to deploy function on Cloud Run.\n", + "\n", + "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ssUJJqXJJHgC" + }, + "outputs": [], + "source": [ + "SERVICE_ACCOUNT = f\"{PROJECT_NUMBER}-compute@developer.gserviceaccount.com\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wqOHg5aid6HP" + }, + "outputs": [], + "source": [ + "for role in ['aiplatform.user', 'storage.objectAdmin', 'artifactregistry.reader', 'run.developer', 'run.invoker']:\n", + " \n", + " ! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", + " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", + " --role=roles/{role}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ek1-iTbPjzdJ" + }, + "source": [ + "### Set tutorial folder and workspace\n", + "\n", + "Set a folder to collect data and any tutorial artifacts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BbfKRabXj3la" + }, + "outputs": [], + "source": [ + "from pathlib import Path as path\n", + "\n", + "ROOT_PATH = path.cwd()\n", + "TUTORIAL_PATH = ROOT_PATH / \"tutorial\"\n", + "CONFIG_PATH = TUTORIAL_PATH / \"config\"\n", + "TUNED_PROMPT_PATH = TUTORIAL_PATH / \"tuned_prompts\"\n", + "BUILD_PATH = TUTORIAL_PATH / \"build\"\n", + "\n", + "TUTORIAL_PATH.mkdir(parents=True, exist_ok=True)\n", + "CONFIG_PATH.mkdir(parents=True, exist_ok=True)\n", + "TUNED_PROMPT_PATH.mkdir(parents=True, exist_ok=True)\n", + "BUILD_PATH.mkdir(parents=True, exist_ok=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BaNdfftpXTIX" + }, + "source": [ + "Set the associated workspace on Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "joJPc3FmX1fk" + }, + "outputs": [], + "source": [ + "from etils import epath\n", + "\n", + "WORKSPACE_URI = epath.Path(BUCKET_URI) / \"prompt_migration_gemini\"\n", + "INPUT_DATA_URI = epath.Path(WORKSPACE_URI) / \"data\"\n", + "\n", + "WORKSPACE_URI.mkdir(parents=True, exist_ok=True)\n", + "INPUT_DATA_URI.mkdir(parents=True, exist_ok=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "960505627ddf" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PyQmSRbKA8r-" + }, + "outputs": [], + "source": [ + "from argparse import Namespace\n", + "import json\n", + "import logging\n", + "\n", + "# General\n", + "from pprint import pprint\n", + "import warnings\n", + "\n", + "from google.cloud import aiplatform\n", + "import pandas as pd\n", + "import requests\n", + "from tutorial.utils.helpers import (\n", + " async_generate,\n", + " display_eval_report,\n", + " get_auth_token,\n", + " get_id,\n", + " get_optimization_result,\n", + " get_results_file_uris,\n", + " init_new_model,\n", + " print_df_rows,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "820DIvw1o8tB" + }, + "source": [ + "### Libraries settings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HKc4ZdUBo_SM" + }, + "outputs": [], + "source": [ + "warnings.filterwarnings(\"ignore\")\n", + "logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.ERROR)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gxc7q4r-DFH4" + }, + "source": [ + "### Define constants" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0Y5t67f3DHNm" + }, + "outputs": [], + "source": [ + "INPUT_DATA_FILE_URI = \"gs://github-repo/prompts/prompt_optimizer/rag_qa_dataset.jsonl\"\n", + "\n", + "EXPERIMENT_NAME = \"qa-prompt-eval\"\n", + "INPUT_TUNING_DATA_URI = epath.Path(WORKSPACE_URI) / \"tuning_data\"\n", + "INPUT_TUNING_DATA_FILE_URI = str(INPUT_DATA_URI / \"prompt_tuning.jsonl\")\n", + "OUTPUT_TUNING_DATA_URI = epath.Path(WORKSPACE_URI) / \"tuned_prompt\"\n", + "APD_CONTAINER_URI = (\n", + " \"us-docker.pkg.dev/vertex-ai-restricted/builtin-algorithm/apd:preview_v1_0\"\n", + ")\n", + "CONFIG_FILE_URI = str(WORKSPACE_URI / \"config\" / \"config.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "init_aip:mbsdk,all" + }, + "source": [ + "### Initialize Vertex AI SDK for Python\n", + "\n", + "Initialize the Vertex AI SDK for Python for your project." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bQMc2Uwf0fBQ" + }, + "outputs": [], + "source": [ + "aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdvJRUWRNGHE" + }, + "source": [ + "## III. Automated prompt design with Vertex AI Prompt Optimizer (Preview)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mmTotjRAJplw" + }, + "source": [ + "### Load the dataset\n", + "\n", + "Load the dataset from Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LA7aG08wJtVm" + }, + "outputs": [], + "source": [ + "prompt_tuning_df = pd.read_json(INPUT_DATA_FILE_URI, lines=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1xn-pz3v5HVK" + }, + "outputs": [], + "source": [ + "prompt_tuning_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PsXdJBJXiaVH" + }, + "outputs": [], + "source": [ + "print_df_rows(prompt_tuning_df, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Rp1n1aMACzSW" + }, + "source": [ + "### Enhance the prompt template with Vertex AI Prompt Optimizer (Preview) with custom metric\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "h1650lf3X8xW" + }, + "source": [ + "#### Prepare the prompt template you want to optimize\n", + "\n", + "A prompt consists of two key parts:\n", + "\n", + "* **System Instruction Template** which is a fixed part of the prompt shared across all queries for a given task.\n", + "\n", + "* **Prompt Template** which is a dynamic part of the prompt that changes based on the task.\n", + "\n", + "Vertex AI Prompt Optimizer enables the translation and optimization of the Instruction Template, while the Task/Context Template remains essential for evaluating different instruction templates.\n", + "\n", + "In this case, you want to translate a prompt\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Db8rHNC6DmtY" + }, + "outputs": [], + "source": [ + "SYSTEM_INSTRUCTION_TEMPLATE = \"\"\"\n", + "Given a question with some context, provide the correct answer to the question.\n", + "\"\"\"\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"\n", + "Some examples of correct answer to a question with context are:\n", + "Question: {{question}}\n", + "Answer: {{target}}\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a1TCgXsrXztm" + }, + "source": [ + "#### Prepare few samples\n", + "\n", + "Vertex AI Prompt optimizer requires a CSV or JSONL file containing labeled samples.\n", + "\n", + "For **prompt optimization**:\n", + "\n", + "* Focus on examples that specifically demonstrate the issues you want to address.\n", + "* Recommendation: Use 50-100 distinct samples for reliable results. However, the tool can still be effective with as few as 5 samples.\n", + "\n", + "For **prompt translation**:\n", + "\n", + "* Consider using the source model to label examples that the target model struggles with, helping to identify areas for improvement.\n", + "\n", + "Learn more about setting up your CSV or JSONL file as input [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vTIl_v9Ig1F-" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df = prompt_tuning_df.copy()\n", + "\n", + "# Prepare question and target columns\n", + "prepared_prompt_tuning_df[\"question\"] = (\n", + " prepared_prompt_tuning_df[\"user_question\"]\n", + " + \"\\nnContext:\\n\"\n", + " + prepared_prompt_tuning_df[\"context\"]\n", + ")\n", + "prepared_prompt_tuning_df = prepared_prompt_tuning_df.rename(\n", + " columns={\"reference\": \"target\"}\n", + ")\n", + "\n", + "# Remove uneccessary columns\n", + "prepared_prompt_tuning_df = prepared_prompt_tuning_df.drop(\n", + " columns=[\"user_question\", \"context\", \"prompt\", \"answer\"]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_DUFEAb82eEi" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nF3XY_d_yB-K" + }, + "source": [ + "#### Upload samples to bucket\n", + "\n", + "Once you prepare samples, you can upload them on Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "155paLgGUXOm" + }, + "outputs": [], + "source": [ + "prepared_prompt_tuning_df.to_json(\n", + " INPUT_TUNING_DATA_FILE_URI, orient=\"records\", lines=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hxpid3KgAkYM" + }, + "source": [ + "#### Define and deploy your own custom optimization metric on Cloud function\n", + "\n", + "To optimize your prompt template using a custom optimization metric, you need to deploy a function with your own metric code on Cloud function. To deploy a Cloud function with your own custom metric, you cover the following steps:\n", + "\n", + "1. Define requirements\n", + "2. Write your own custom metric function code\n", + "3. Deploy the custom code as Cloud function\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Nxh2e88fAnQc" + }, + "source": [ + "##### Define requirements\n", + "\n", + "Set the custom metric dependencies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "q-hUlhgBCus4" + }, + "outputs": [], + "source": [ + "requirements = \"\"\"\n", + "functions-framework==3.*\n", + "google-cloud-aiplatform\n", + "\"\"\"\n", + "\n", + "with open(BUILD_PATH / \"requirements.txt\", \"w\") as f:\n", + " f.write(requirements)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k_EFZEBeEy48" + }, + "source": [ + "##### Write your own custom metric function\n", + "\n", + "Define the module which contains your own custom metric function definition.\n", + "\n", + "It is important to highlight that you need to retrieve the input data using `request.get_json()` as shown below. This will return a json dict. The `response` field will be provided by the service which contains the LLM output.\n", + "\n", + "Also you have to return a json serialized dict with two fields: `custom metric name` you specified, and the `explanation` to correctly optimize the prompt template with your own metric.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1wGoVQNCMbxe" + }, + "outputs": [], + "source": [ + "custom_metric_function_code = '''\n", + "\"\"\"\n", + "This module contains the custom evaluation metric definition to optimize a prompt template with Vertex AI Prompt Optimizer\n", + "\"\"\"\n", + "\n", + "from typing import Dict\n", + "from vertexai.generative_models import (\n", + " GenerationConfig,\n", + " GenerativeModel,\n", + " HarmBlockThreshold,\n", + " HarmCategory,\n", + ")\n", + "\n", + "import json\n", + "import functions_framework\n", + "\n", + "def get_autorater_response(metric_prompt: str) -> dict:\n", + " \"\"\"This function is to generate the evaluation response from the autorater.\"\"\"\n", + "\n", + " metric_response_schema = {\n", + " \"type\": \"OBJECT\",\n", + " \"properties\": {\n", + " \"score\": {\"type\": \"NUMBER\"},\n", + " \"explanation\": {\"type\": \"STRING\"},\n", + " },\n", + " \"required\": [\"score\", \"explanation\"],\n", + " }\n", + "\n", + " autorater = GenerativeModel(\n", + " \"gemini-1.5-pro\",\n", + " generation_config=GenerationConfig(\n", + " response_mime_type=\"application/json\",\n", + " response_schema=metric_response_schema,\n", + " ),\n", + " safety_settings={\n", + " HarmCategory.HARM_CATEGORY_UNSPECIFIED: HarmBlockThreshold.BLOCK_NONE,\n", + " HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n", + " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,\n", + " HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,\n", + " HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,\n", + " },\n", + " )\n", + "\n", + " response = autorater.generate_content(metric_prompt)\n", + "\n", + " response_json = {}\n", + " response_json = json.loads(part.text)\n", + " return response_json\n", + "\n", + "\n", + "# Define custom evaluation criteria\n", + "def evaluate_engagement_personalization_fn(question: str, response:str, target: str) -> Dict[str, str]:\n", + " \"\"\"Evaluates an AI-generated response for User Engagement and Personalization.\"\"\"\n", + "\n", + " custom_metric_prompt_template = \"\"\"\n", + "\n", + " # Instruction\n", + " You are an expert evaluator. Your task is to evaluate the quality of the LLM-generated responses against a reference target response.\n", + " You should first read the Question carefully, and then evaluate the quality of the responses based on the Criteria provided in the Evaluation section below.\n", + " You will assign the response a rating following the Rating Rubric only and an step-by-step explanation for your rating.\n", + "\n", + " # Evaluation\n", + "\n", + " ## Criteria\n", + " Relevance and Customization: The response should directly address the user's query and demonstrate an understanding of their specific needs or preferences, such as dietary restrictions, skill level, or taste preferences.\n", + " Interactivity and Proactiveness: The response should go beyond simply answering the question by actively encouraging further interaction through follow-up questions, suggestions for additional exploration, or prompts for more information to provide a tailored experience.\n", + " Tone and Empathy: The response should adopt an appropriate and empathetic tone that fosters a positive and supportive user experience, making the user feel heard and understood.\n", + "\n", + " ## Rating rubric\n", + " 1 - Minimal: The response lacks personalization and demonstrates minimal engagement with the user. The tone may be impersonal or generic.\n", + " 2 - Basic: The response shows some basic personalization but lacks depth or specificity. Engagement is limited, possibly with generic prompts or suggestions. The tone is generally neutral but may lack warmth or empathy.\n", + " 3 - Moderate: The response demonstrates clear personalization and attempts to engage the user with relevant follow-up questions or prompts based on their query. The tone is friendly and supportive, fostering a positive user experience.\n", + " 4 - High: The response demonstrates a high degree of personalization and actively engages the user with relevant follow-up questions or prompts. The tone is empathetic and understanding, creating a strong connection with the user.\n", + " 5 - Exceptional: The response goes above and beyond to personalize the experience, anticipating user needs, and fostering a genuine connection. The tone is warm, encouraging, and inspiring, leaving the user feeling empowered and motivated.\n", + "\n", + " ## Evaluation steps\n", + " Step 1: Carefully read both the question and the generated response. Ensure a clear understanding of the user's intent, needs, and any specific context provided.\n", + " Step 2: Evaluate how well the response directly addresses the user's query and demonstrates an understanding of their specific needs or preferences.\n", + " Step 3: Determine the extent to which the response actively encourages further interaction and provides a tailored experience.\n", + " Step 4: Evaluate Tone & Empathy: Analyze the tone of the response, ensuring it fosters a positive and supportive user experience, making the user feel heard and understood.\n", + " Step 5: Based on the three criteria above, assign a score from 1 to 5 according to the score rubric.\n", + " Step 5: Justify the assigned score with a clear and concise explanation, highlighting the strengths and weaknesses of the response with respect to each criterion.\n", + "\n", + " # Question : {question}\n", + " # Generated response: {response}\n", + " # Reference response: {target}\n", + " \"\"\"\n", + "\n", + " custom_metric_prompt = custom_metric_prompt_template.format(question=question, response=response, target=target)\n", + " response_dict = get_autorater_response(custom_metric_prompt)\n", + "\n", + " return {\n", + " \"custom_metric\": response_dict[\"score\"],\n", + " \"explanation\": response_dict[\"explanation\"],\n", + " }\n", + "\n", + "# Register an HTTP function with the Functions Framework\n", + "@functions_framework.http\n", + "def main(request):\n", + " request_json = request.get_json(silent=True)\n", + "\n", + " if not request_json:\n", + " raise ValueError('Cannot find request json.')\n", + "\n", + " question = request_json['question']\n", + " response = request_json['response']\n", + " reference = request_json['target']\n", + "\n", + " get_evaluation_result = evaluate_engagement_personalization_fn(question, response, reference)\n", + " return json.dumps(get_evaluation_result)\n", + "'''\n", + "\n", + "with open(BUILD_PATH / \"main.py\", \"w\") as f:\n", + " f.write(custom_metric_function_code)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T7R0LDZMCPnL" + }, + "source": [ + "##### Deploy the custom metric as a Cloud Function\n", + "\n", + "Use gcloud command line to deploy the cloud function. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nwBZGvkLCizs" + }, + "outputs": [], + "source": [ + "!gcloud functions deploy 'custom_engagement_personalization_metric' \\\n", + " --gen2 \\\n", + " --runtime=\"python310\" \\\n", + " --source={str(BUILD_PATH)} \\\n", + " --entry-point=main \\\n", + " --trigger-http \\\n", + " --timeout=3600 \\\n", + " --memory=2Gb \\\n", + " --concurrency=6 \\\n", + " --min-instances=6 \\\n", + " --project {PROJECT_ID} \\\n", + " --region={REGION} \\\n", + " --quiet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FoEypczSwAGK" + }, + "source": [ + "##### Test your custom evaluation function\n", + "\n", + "Submit a request to validate the output of the custom evaluation function. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HXOWYp2MwEsA" + }, + "outputs": [], + "source": [ + "custom_evaluator_function_uri = ! gcloud functions describe 'custom_engagement_personalization_metric' --gen2 --region {REGION} --format=\"value(url)\"\n", + "custom_evaluator_function_uri = custom_evaluator_function_uri[0].strip()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0JMeIyx0DHnc" + }, + "outputs": [], + "source": [ + "headers = {\n", + " \"Authorization\": f\"Bearer {get_auth_token()}\",\n", + " \"Content-Type\": \"application/json\",\n", + "}\n", + "\n", + "json_data = {\n", + " \"question\": \"\"\"\n", + " What are some techniques for cooking red meat and pork that maximize flavor and tenderness while minimizing the formation of unhealthy compounds?\n", + " \"\"\",\n", + " \"response\": \"\"\"\n", + " * Marinating in acidic ingredients like lemon juice or vinegar to tenderize the meat \\n * Cooking to an internal temperature of 145°F (63°C) for safety \\n * Using high-heat cooking methods like grilling and pan-searing for browning and caramelization /n * Avoiding charring to minimize the formation of unhealthy compounds\n", + " \"\"\",\n", + " \"target\": \"\"\"\n", + " Here's how to tackle those delicious red meats and pork while keeping things healthy:\n", + " **Prioritize Low and Slow:**\n", + " * **Braising and Stewing:** These techniques involve gently simmering meat in liquid over low heat for an extended period. This breaks down tough collagen, resulting in incredibly tender and flavorful meat. Plus, since the cooking temperature is lower, it minimizes the formation of potentially harmful compounds associated with high-heat cooking.\n", + " * **Sous Vide:** This method involves sealing meat in a vacuum bag and immersing it in a precisely temperature-controlled water bath. It allows for even cooking to the exact desired doneness, resulting in incredibly juicy and tender meat. Because the temperature is controlled and lower than traditional methods, it can be a healthier option.\n", + " **High Heat Tips:**\n", + " * **Marinades are Your Friend:** As you mentioned, acidic marinades tenderize meat. They also add flavor!\n", + " * **Temperature Control is Key:** Use a meat thermometer to ensure you reach the safe internal temperature of 145°F (63°C) without overcooking.\n", + " * **Don't Burn It!** While some browning is desirable, charring creates those unhealthy compounds. Pat meat dry before cooking to minimize steaming and promote browning. Let the pan heat up properly before adding the meat to achieve a good sear.\n", + "\n", + " **Remember:** Trim visible fat before cooking to reduce saturated fat content. Let meat rest after cooking; this allows juices to redistribute, resulting in a more tender and flavorful final product.\n", + " \"\"\",\n", + "}\n", + "\n", + "response = requests.post(\n", + " custom_evaluator_function_uri, headers=headers, json=json_data, timeout=70\n", + ").json()\n", + "pprint(response)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F5RD0l2xX-FI" + }, + "source": [ + "#### Configure optimization settings\n", + "\n", + "Vertex AI Prompt Optimizer allows you to optimize prompts by optimizing instructions only, demonstration only, or both (`optimization_mode`), and after you set the system instruction, prompt templates that will be optimized (`system_instruction`, `prompt_template`), and the model you want to optimize for (`target_model`), it allows to condition the optimization process by setting metrics, number of iterations used to improve the prompt and more.\n", + "\n", + "In this scenario, you set two parameters:\n", + "\n", + "* `custom_metric_name` parameter which allows you to pass your own custom metric to optimizer the prompt template.\n", + "* `custom_metric_cloud_function_name` parameter which indicates the Cloud function to call for collecting custom function evaluation metric output.\n", + "\n", + "For additional configurations, check out the documentation [here](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sFHutXhgeqRx" + }, + "outputs": [], + "source": [ + "PROMPT_OPTIMIZATION_JOB = \"auto-prompt-design-job-\" + get_id()\n", + "OUTPUT_TUNING_RUN_URI = str(OUTPUT_TUNING_DATA_URI / PROMPT_OPTIMIZATION_JOB)\n", + "\n", + "args = Namespace(\n", + " # Basic configuration\n", + " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE,\n", + " prompt_template=PROMPT_TEMPLATE,\n", + " target_model=\"gemini-1.5-flash-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " optimization_mode=\"instruction\", # Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", + " custom_metric_name=\"custom_metric\",\n", + " custom_metric_cloud_function_name=\"custom_engagement_personalization_metric\",\n", + " num_steps=3,\n", + " num_template_eval_per_step=2,\n", + " num_demo_set_candidates=3,\n", + " demo_set_size=2,\n", + " input_data_path=INPUT_TUNING_DATA_FILE_URI,\n", + " output_path=OUTPUT_TUNING_RUN_URI,\n", + " project=PROJECT_ID,\n", + " # Advanced configuration\n", + " target_model_qps=1,\n", + " target_model_location=\"us-central1\",\n", + " source_model=\"\",\n", + " source_model_qps=\"\",\n", + " source_model_location=\"\",\n", + " optimizer_model=\"gemini-1.5-pro-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " optimizer_model_qps=1,\n", + " optimizer_model_location=\"us-central1\",\n", + " eval_model=\"gemini-1.5-pro-001\", # Supported models: \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " eval_qps=1,\n", + " eval_model_location=\"us-central1\",\n", + " eval_metrics_types=[\n", + " \"question_answering_correctness\",\n", + " \"custom_metric\",\n", + " ], # Supported metrics: \"bleu\", \"coherence\", \"exact_match\", \"fluidity\", \"fulfillment\", \"groundedness\", \"rouge_1\", \"rouge_2\", \"rouge_l\", \"rouge_l_sum\", \"safety\", \"question_answering_correctness\", \"question_answering_helpfulness\", \"question_answering_quality\", \"question_answering_relevance\", \"summarization_helpfulness\", \"summarization_quality\", \"summarization_verbosity\", \"tool_name_match\", \"tool_parameter_key_match\", \"tool_parameter_kv_match\"\n", + " eval_metrics_weights=[0.8, 0.2],\n", + " aggregation_type=\"weighted_sum\", # Supported aggregation types: \"weighted_sum\", \"weighted_average\"\n", + " data_limit=50,\n", + " response_mime_type=\"application/json\",\n", + " language=\"English\", # Supported languages: \"English\", \"French\", \"German\", \"Hebrew\", \"Hindi\", \"Japanese\", \"Korean\", \"Portuguese\", \"Simplified Chinese\", \"Spanish\", \"Traditional Chinese\"\n", + " placeholder_to_content=json.loads(\"{}\"),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Jd_uzQYQx6L7" + }, + "source": [ + "#### Upload Vertex AI Prompt Optimizer (Preview) config to Cloud Storage\n", + "\n", + "After you define Vertex AI Prompt Optimizer (Preview) configuration, you upload them on Cloud Storage bucket.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QCJAqcfWBqAh" + }, + "source": [ + "Now you can save the config to the bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iqiv8ApR_SAM" + }, + "outputs": [], + "source": [ + "args = vars(args)\n", + "\n", + "with epath.Path(CONFIG_FILE_URI).open(\"w\") as config_file:\n", + " json.dump(args, config_file)\n", + "config_file.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "spqgBT8hYAle" + }, + "source": [ + "#### Run the automatic prompt optimization job\n", + "\n", + "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GtPnvKIpUQ3q" + }, + "outputs": [], + "source": [ + "WORKER_POOL_SPECS = [\n", + " {\n", + " \"machine_spec\": {\n", + " \"machine_type\": \"n1-standard-4\",\n", + " },\n", + " \"replica_count\": 1,\n", + " \"container_spec\": {\n", + " \"image_uri\": APD_CONTAINER_URI,\n", + " \"args\": [\"--config=\" + CONFIG_FILE_URI],\n", + " },\n", + " }\n", + "]\n", + "\n", + "custom_job = aiplatform.CustomJob(\n", + " display_name=PROMPT_OPTIMIZATION_JOB,\n", + " worker_pool_specs=WORKER_POOL_SPECS,\n", + ")\n", + "\n", + "custom_job.run(service_account=SERVICE_ACCOUNT)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3YwwKBhtJ4ut" + }, + "source": [ + "### Collect the optimization results\n", + "\n", + "After the optimization job successfully run, you collect the optimized templates and evaluation results for the instruction\n", + "\n", + "Below you use a helper function to read the optimal system instruction template and the associated evaluation metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xTPJsvg-kzkO" + }, + "outputs": [], + "source": [ + "apd_result_uris = get_results_file_uris(\n", + " output_uri=OUTPUT_TUNING_RUN_URI,\n", + " required_files=[\"eval_results.json\", \"templates.json\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PrezXkBUu1s5" + }, + "outputs": [], + "source": [ + "best_prompt_df, prompt_summary_df, prompt_metrics_df = get_optimization_result(\n", + " apd_result_uris[\"instruction_templates\"],\n", + " apd_result_uris[\"instruction_eval_results\"],\n", + ")\n", + "\n", + "display_eval_report(\n", + " (best_prompt_df, prompt_summary_df, prompt_metrics_df),\n", + " prompt_component=\"instruction\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TrMrbcA5Gzep" + }, + "source": [ + "### Validate and evaluate the optimized template in question-answering task\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bGRELw3U3I28" + }, + "source": [ + "#### Generate new responses using the optimized template\n", + "\n", + "Then, you generate the new responses with the optimized template. Below you can see an example of a generated response using the optimized system instructions template." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GXDU_ydAG5ak" + }, + "outputs": [], + "source": [ + "optimized_prompt_template = (\n", + " best_prompt_df[\"prompt\"].iloc[0]\n", + " + \"\\nQuestion: \\n{question}\"\n", + " + \"\\nContext: \\n{context}\"\n", + " + \"\\nAnswer:\"\n", + ")\n", + "\n", + "optimized_prompts = [\n", + " optimized_prompt_template.format(question=q, context=c)\n", + " for q, c in zip(\n", + " prompt_tuning_df[\"user_question\"].to_list(),\n", + " prompt_tuning_df[\"context\"].to_list(),\n", + " )\n", + "]\n", + "\n", + "prompt_tuning_df[\"optimized_prompt_with_vapo\"] = optimized_prompts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qG6QJW8alttS" + }, + "outputs": [], + "source": [ + "gemini_llm = init_new_model(\"gemini-1.5-flash-001\")\n", + "\n", + "gemini_predictions = [async_generate(p, model=gemini_llm) for p in optimized_prompts]\n", + "\n", + "gemini_predictions_col = await tqdm_asyncio.gather(*gemini_predictions)\n", + "\n", + "prompt_tuning_df[\"gemini_answer_with_vapo\"] = gemini_predictions_col" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4sGDKpXU-SqG" + }, + "source": [ + "#### Evaluate the quality of generated responses with the optimized instruction\n", + "\n", + "Finally, you evaluate generated responses with the optimized instruction qualitatively. If you want to know how to evaluate the new generated responses quantitatively, check out [the SDK notebook](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/gemini/prompts/prompt_optimizer) in the official repo." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_55cHbD4kFAz" + }, + "outputs": [], + "source": [ + "print_df_rows(prompt_tuning_df, n=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a4e033321ad" + }, + "source": [ + "## IV. Clean up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WRY_3wh1GVNm" + }, + "outputs": [], + "source": [ + "delete_bucket = False\n", + "delete_job = False\n", + "delete_experiment = False\n", + "delete_tutorial = False\n", + "\n", + "if delete_bucket:\n", + " ! gsutil rm -r $BUCKET_URI\n", + "\n", + "if delete_job:\n", + " custom_job.delete()\n", + "\n", + "if delete_experiment:\n", + " experiment = aiplatform.Experiment(experiment_name=EXPERIMENT_NAME)\n", + " experiment.delete()\n", + "\n", + "if delete_tutorial:\n", + " import shutil\n", + "\n", + " shutil.rmtree(str(TUTORIAL_PATH))" + ] + } + ], + "metadata": { + "colab": { + "name": "vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 35cb6c210fb288e06591cff3fed8a10ce295c7d9 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Thu, 3 Oct 2024 12:20:02 -0500 Subject: [PATCH 37/76] fix: Add terms to spelling allowlist (#1208) --- .github/actions/spelling/allow.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 8fc2c985e3..852e78d42c 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -368,8 +368,10 @@ embs embvs emojis ename +epath epoc etf +etils eur evals faiss @@ -390,6 +392,7 @@ freedraw freopen fromarray fromiter +fsspec fts fulltext funtion From 8daeb652be36800a35470cf1e59e376a94197539 Mon Sep 17 00:00:00 2001 From: Ariel Jassan Date: Thu, 3 Oct 2024 20:50:29 +0300 Subject: [PATCH 38/76] refactor: move notebook to gemini/evaluation folder (#1201) Move notebook to gemini/evaluation folder --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Co-authored-by: Ivan Nardini <88703814+inardini@users.noreply.github.com> --- ...rompts_at_scale_with_gemini_batch_prediction_api.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename gemini/{prompts => evaluation}/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb (98%) diff --git a/gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb b/gemini/evaluation/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb similarity index 98% rename from gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb rename to gemini/evaluation/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb index 47b3ebc633..1f49507196 100644 --- a/gemini/prompts/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb +++ b/gemini/evaluation/evaluating_prompts_at_scale_with_gemini_batch_prediction_api.ipynb @@ -33,22 +33,22 @@ "\n", "\n", " \n", " \n", " \n", " \n", From 57d6191fd4f948ce3e92229a91bced69e2e0c499 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 3 Oct 2024 23:11:16 +0200 Subject: [PATCH 39/76] chore(deps): update all non-major dependencies (#1199) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | Type | Update | |---|---|---|---|---|---|---|---| | [@angular-devkit/build-angular](https://redirect.github.com/angular/angular-cli) | [`17.3.9` -> `17.3.10`](https://renovatebot.com/diffs/npm/@angular-devkit%2fbuild-angular/17.3.9/17.3.10) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@angular-devkit%2fbuild-angular/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@angular-devkit%2fbuild-angular/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@angular-devkit%2fbuild-angular/17.3.9/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@angular-devkit%2fbuild-angular/17.3.9/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [@angular/cli](https://redirect.github.com/angular/angular-cli) | [`17.3.9` -> `17.3.10`](https://renovatebot.com/diffs/npm/@angular%2fcli/17.3.9/17.3.10) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@angular%2fcli/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@angular%2fcli/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@angular%2fcli/17.3.9/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@angular%2fcli/17.3.9/17.3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [@angular/cli](https://redirect.github.com/angular/angular-cli) | [`18.2.4` -> `18.2.6`](https://renovatebot.com/diffs/npm/@angular%2fcli/18.2.4/18.2.6) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@angular%2fcli/18.2.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@angular%2fcli/18.2.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@angular%2fcli/18.2.4/18.2.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@angular%2fcli/18.2.4/18.2.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [@floating-ui/dom](https://floating-ui.com) ([source](https://redirect.github.com/floating-ui/floating-ui/tree/HEAD/packages/dom)) | [`1.6.10` -> `1.6.11`](https://renovatebot.com/diffs/npm/@floating-ui%2fdom/1.6.10/1.6.11) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@floating-ui%2fdom/1.6.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@floating-ui%2fdom/1.6.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@floating-ui%2fdom/1.6.10/1.6.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@floating-ui%2fdom/1.6.10/1.6.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [@google-cloud/discoveryengine](https://redirect.github.com/googleapis/google-cloud-node/tree/main/packages/google-cloud-discoveryengine) ([source](https://redirect.github.com/googleapis/google-cloud-node/tree/HEAD/packages/google-cloud-discoveryengine)) | [`1.13.0` -> `1.14.0`](https://renovatebot.com/diffs/npm/@google-cloud%2fdiscoveryengine/1.13.0/1.14.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@google-cloud%2fdiscoveryengine/1.14.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@google-cloud%2fdiscoveryengine/1.14.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@google-cloud%2fdiscoveryengine/1.13.0/1.14.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@google-cloud%2fdiscoveryengine/1.13.0/1.14.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [@google-cloud/storage](https://redirect.github.com/googleapis/nodejs-storage) | [`7.12.1` -> `7.13.0`](https://renovatebot.com/diffs/npm/@google-cloud%2fstorage/7.12.1/7.13.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@google-cloud%2fstorage/7.13.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@google-cloud%2fstorage/7.13.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@google-cloud%2fstorage/7.12.1/7.13.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@google-cloud%2fstorage/7.12.1/7.13.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [@google-cloud/vertexai](https://redirect.github.com/googleapis/nodejs-vertexai) | [`1.7.0` -> `1.8.1`](https://renovatebot.com/diffs/npm/@google-cloud%2fvertexai/1.7.0/1.8.1) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@google-cloud%2fvertexai/1.8.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@google-cloud%2fvertexai/1.8.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@google-cloud%2fvertexai/1.7.0/1.8.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@google-cloud%2fvertexai/1.7.0/1.8.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [@openapitools/openapi-generator-cli](https://redirect.github.com/OpenAPITools/openapi-generator-cli) | [`2.13.5` -> `2.13.12`](https://renovatebot.com/diffs/npm/@openapitools%2fopenapi-generator-cli/2.13.5/2.13.12) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@openapitools%2fopenapi-generator-cli/2.13.12?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@openapitools%2fopenapi-generator-cli/2.13.12?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@openapitools%2fopenapi-generator-cli/2.13.5/2.13.12?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@openapitools%2fopenapi-generator-cli/2.13.5/2.13.12?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [@sveltejs/adapter-auto](https://kit.svelte.dev) ([source](https://redirect.github.com/sveltejs/kit/tree/HEAD/packages/adapter-auto)) | [`3.2.4` -> `3.2.5`](https://renovatebot.com/diffs/npm/@sveltejs%2fadapter-auto/3.2.4/3.2.5) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@sveltejs%2fadapter-auto/3.2.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@sveltejs%2fadapter-auto/3.2.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@sveltejs%2fadapter-auto/3.2.4/3.2.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@sveltejs%2fadapter-auto/3.2.4/3.2.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [@sveltejs/adapter-static](https://kit.svelte.dev) ([source](https://redirect.github.com/sveltejs/kit/tree/HEAD/packages/adapter-static)) | [`3.0.4` -> `3.0.5`](https://renovatebot.com/diffs/npm/@sveltejs%2fadapter-static/3.0.4/3.0.5) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@sveltejs%2fadapter-static/3.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@sveltejs%2fadapter-static/3.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@sveltejs%2fadapter-static/3.0.4/3.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@sveltejs%2fadapter-static/3.0.4/3.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [@sveltejs/kit](https://kit.svelte.dev) ([source](https://redirect.github.com/sveltejs/kit/tree/HEAD/packages/kit)) | [`2.5.26` -> `2.6.1`](https://renovatebot.com/diffs/npm/@sveltejs%2fkit/2.5.26/2.6.1) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@sveltejs%2fkit/2.6.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@sveltejs%2fkit/2.6.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@sveltejs%2fkit/2.5.26/2.6.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@sveltejs%2fkit/2.5.26/2.6.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | minor | | [@types/lodash](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/lodash) ([source](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/lodash)) | [`4.17.7` -> `4.17.9`](https://renovatebot.com/diffs/npm/@types%2flodash/4.17.7/4.17.9) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@types%2flodash/4.17.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@types%2flodash/4.17.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@types%2flodash/4.17.7/4.17.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@types%2flodash/4.17.7/4.17.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [@types/pg](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/pg) ([source](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/pg)) | [`8.11.8` -> `8.11.10`](https://renovatebot.com/diffs/npm/@types%2fpg/8.11.8/8.11.10) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@types%2fpg/8.11.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@types%2fpg/8.11.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@types%2fpg/8.11.8/8.11.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@types%2fpg/8.11.8/8.11.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | patch | | [SQLAlchemy](https://www.sqlalchemy.org) ([changelog](https://docs.sqlalchemy.org/en/latest/changelog/)) | `==2.0.34` -> `==2.0.35` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/SQLAlchemy/2.0.35?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/SQLAlchemy/2.0.35?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/SQLAlchemy/2.0.34/2.0.35?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/SQLAlchemy/2.0.34/2.0.35?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | patch | | [aiohttp](https://redirect.github.com/aio-libs/aiohttp) | `3.10.5` -> `3.10.8` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/aiohttp/3.10.8?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/aiohttp/3.10.8?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/aiohttp/3.10.5/3.10.8?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/aiohttp/3.10.5/3.10.8?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [altair](https://redirect.github.com/vega/altair) | `5.3.0` -> `5.4.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/altair/5.4.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/altair/5.4.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/altair/5.3.0/5.4.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/altair/5.3.0/5.4.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [anthropic](https://redirect.github.com/anthropics/anthropic-sdk-python) | `^0.31.2` -> `^0.34.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/anthropic/0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/anthropic/0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/anthropic/0.31.2/0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/anthropic/0.31.2/0.34.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | antlr4-python3-runtime | `4.9.3` -> `4.13.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/antlr4-python3-runtime/4.13.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/antlr4-python3-runtime/4.13.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/antlr4-python3-runtime/4.9.3/4.13.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/antlr4-python3-runtime/4.9.3/4.13.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [anyio](https://redirect.github.com/agronholm/anyio) ([changelog](https://anyio.readthedocs.io/en/stable/versionhistory.html)) | `4.4.0` -> `4.6.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/anyio/4.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/anyio/4.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/anyio/4.4.0/4.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/anyio/4.4.0/4.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | bcrypt | `4.1.3` -> `4.2.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/bcrypt/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/bcrypt/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/bcrypt/4.1.3/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/bcrypt/4.1.3/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [build](https://redirect.github.com/pypa/build) ([changelog](https://build.pypa.io/en/stable/changelog.html)) | `1.2.1` -> `1.2.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/build/1.2.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/build/1.2.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/build/1.2.1/1.2.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/build/1.2.1/1.2.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [cachetools](https://redirect.github.com/tkem/cachetools) | `5.4.0` -> `5.5.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/cachetools/5.5.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/cachetools/5.5.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/cachetools/5.4.0/5.5.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/cachetools/5.4.0/5.5.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [certifi](https://redirect.github.com/certifi/python-certifi) | `2024.7.4` -> `2024.8.30` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/certifi/2024.8.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/certifi/2024.8.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/certifi/2024.7.4/2024.8.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/certifi/2024.7.4/2024.8.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [cffi](http://cffi.readthedocs.org) ([source](https://redirect.github.com/python-cffi/cffi), [changelog](https://cffi.readthedocs.io/en/latest/whatsnew.html)) | `1.16.0` -> `1.17.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/cffi/1.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/cffi/1.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/cffi/1.16.0/1.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/cffi/1.16.0/1.17.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [chroma-hnswlib](https://redirect.github.com/chroma-core/hnswlib) | `0.7.5` -> `0.7.6` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/chroma-hnswlib/0.7.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/chroma-hnswlib/0.7.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/chroma-hnswlib/0.7.5/0.7.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/chroma-hnswlib/0.7.5/0.7.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [chromadb](https://redirect.github.com/chroma-core/chroma) | `0.5.4` -> `0.5.11` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/chromadb/0.5.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/chromadb/0.5.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/chromadb/0.5.4/0.5.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/chromadb/0.5.4/0.5.11?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [contourpy](https://redirect.github.com/contourpy/contourpy) ([changelog](https://contourpy.readthedocs.io/en/latest/changelog.html)) | `1.2.1` -> `1.3.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/contourpy/1.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/contourpy/1.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/contourpy/1.2.1/1.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/contourpy/1.2.1/1.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [datasets](https://redirect.github.com/huggingface/datasets) | `2.20.0` -> `2.21.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/datasets/2.21.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/datasets/2.21.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/datasets/2.20.0/2.21.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/datasets/2.20.0/2.21.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [dill](https://redirect.github.com/uqfoundation/dill) | `0.3.8` -> `0.3.9` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/dill/0.3.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/dill/0.3.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/dill/0.3.8/0.3.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/dill/0.3.8/0.3.9?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [dulwich](https://redirect.github.com/dulwich/dulwich) | `0.21.7` -> `0.22.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/dulwich/0.22.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/dulwich/0.22.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/dulwich/0.21.7/0.22.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/dulwich/0.21.7/0.22.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [faker](https://redirect.github.com/joke2k/faker) ([changelog](https://redirect.github.com/joke2k/faker/blob/master/CHANGELOG.md)) | `30.0.0` -> `30.1.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/faker/30.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/faker/30.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/faker/30.0.0/30.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/faker/30.0.0/30.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [fastapi](https://redirect.github.com/fastapi/fastapi) ([changelog](https://fastapi.tiangolo.com/release-notes/)) | `0.111.1` -> `0.115.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/fastapi/0.115.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/fastapi/0.115.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/fastapi/0.111.1/0.115.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/fastapi/0.111.1/0.115.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [fastapi-cli](https://redirect.github.com/fastapi/fastapi-cli) | `0.0.4` -> `0.0.5` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/fastapi-cli/0.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/fastapi-cli/0.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/fastapi-cli/0.0.4/0.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/fastapi-cli/0.0.4/0.0.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [filelock](https://redirect.github.com/tox-dev/py-filelock) | `3.15.4` -> `3.16.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/filelock/3.16.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/filelock/3.16.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/filelock/3.15.4/3.16.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/filelock/3.15.4/3.16.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [flowbite](https://flowbite.com) ([source](https://redirect.github.com/themesberg/flowbite)) | [`2.5.1` -> `2.5.2`](https://renovatebot.com/diffs/npm/flowbite/2.5.1/2.5.2) | [![age](https://developer.mend.io/api/mc/badges/age/npm/flowbite/2.5.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/flowbite/2.5.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/flowbite/2.5.1/2.5.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/flowbite/2.5.1/2.5.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [flowbite-svelte](https://flowbite-svelte.com/) ([source](https://redirect.github.com/themesberg/flowbite-svelte)) | [`0.46.16` -> `0.46.22`](https://renovatebot.com/diffs/npm/flowbite-svelte/0.46.16/0.46.22) | [![age](https://developer.mend.io/api/mc/badges/age/npm/flowbite-svelte/0.46.22?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/flowbite-svelte/0.46.22?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/flowbite-svelte/0.46.16/0.46.22?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/flowbite-svelte/0.46.16/0.46.22?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [fonttools](https://redirect.github.com/fonttools/fonttools) | `4.53.1` -> `4.54.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/fonttools/4.54.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/fonttools/4.54.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/fonttools/4.53.1/4.54.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/fonttools/4.53.1/4.54.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-api-core](https://redirect.github.com/googleapis/python-api-core) | `2.19.1` -> `2.20.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-api-core/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-api-core/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-api-core/2.19.1/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-api-core/2.19.1/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-api-core](https://redirect.github.com/googleapis/python-api-core) | `==2.19.2` -> `==2.20.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-api-core/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-api-core/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-api-core/2.19.2/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-api-core/2.19.2/2.20.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [google-auth](https://redirect.github.com/googleapis/google-auth-library-python) | `2.32.0` -> `2.35.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-auth/2.35.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-auth/2.35.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-auth/2.32.0/2.35.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-auth/2.32.0/2.35.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-cloud-aiplatform](https://redirect.github.com/googleapis/python-aiplatform) | `1.59.0` -> `1.68.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-aiplatform/1.59.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-aiplatform/1.59.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-cloud-aiplatform](https://redirect.github.com/googleapis/python-aiplatform) | `==1.65.0` -> `==1.68.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-aiplatform/1.65.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-aiplatform/1.65.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [google-cloud-aiplatform](https://redirect.github.com/googleapis/python-aiplatform) | `==1.63.0` -> `==1.68.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-aiplatform/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-aiplatform/1.63.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-aiplatform/1.63.0/1.68.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [google-cloud-bigquery](https://redirect.github.com/googleapis/python-bigquery) | `3.25.0` -> `3.26.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-bigquery/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-bigquery/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-bigquery/3.25.0/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-bigquery/3.25.0/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-cloud-bigquery](https://redirect.github.com/googleapis/python-bigquery) | `==3.25.0` -> `==3.26.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-bigquery/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-bigquery/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-bigquery/3.25.0/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-bigquery/3.25.0/3.26.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [google-cloud-documentai](https://redirect.github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-documentai) ([source](https://redirect.github.com/googleapis/google-cloud-python)) | `2.29.2` -> `2.32.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-documentai/2.32.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-documentai/2.32.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-documentai/2.29.2/2.32.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-documentai/2.29.2/2.32.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-cloud-pubsub](https://redirect.github.com/googleapis/python-pubsub) | `==2.23.0` -> `==2.25.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-pubsub/2.25.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-pubsub/2.25.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-pubsub/2.23.0/2.25.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-pubsub/2.23.0/2.25.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [google-cloud-resource-manager](https://redirect.github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-resource-manager) ([source](https://redirect.github.com/googleapis/google-cloud-python)) | `1.12.4` -> `1.12.5` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-resource-manager/1.12.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-resource-manager/1.12.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-resource-manager/1.12.4/1.12.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-resource-manager/1.12.4/1.12.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [google-cloud-secret-manager](https://redirect.github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-secret-manager) ([source](https://redirect.github.com/googleapis/google-cloud-python)) | `2.20.1` -> `2.20.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-secret-manager/2.20.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-secret-manager/2.20.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-secret-manager/2.20.1/2.20.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-secret-manager/2.20.1/2.20.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [google-cloud-storage](https://redirect.github.com/googleapis/python-storage) | `2.17.0` -> `2.18.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-cloud-storage/2.18.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-cloud-storage/2.18.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-cloud-storage/2.17.0/2.18.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-cloud-storage/2.17.0/2.18.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-crc32c](https://redirect.github.com/googleapis/python-crc32c) | `1.5.0` -> `1.6.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-crc32c/1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-crc32c/1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-crc32c/1.5.0/1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-crc32c/1.5.0/1.6.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [google-resumable-media](https://redirect.github.com/googleapis/google-resumable-media-python) | `2.7.1` -> `2.7.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/google-resumable-media/2.7.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/google-resumable-media/2.7.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/google-resumable-media/2.7.1/2.7.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/google-resumable-media/2.7.1/2.7.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [googleapis-common-protos](https://redirect.github.com/googleapis/python-api-common-protos) | `1.63.2` -> `1.65.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/googleapis-common-protos/1.65.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/googleapis-common-protos/1.65.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/googleapis-common-protos/1.63.2/1.65.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/googleapis-common-protos/1.63.2/1.65.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [greenlet](https://greenlet.readthedocs.io/) | `3.0.3` -> `3.1.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/greenlet/3.1.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/greenlet/3.1.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/greenlet/3.0.3/3.1.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/greenlet/3.0.3/3.1.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [grpcio](https://grpc.io) ([source](https://redirect.github.com/grpc/grpc)) | `1.64.1` -> `1.66.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/grpcio/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/grpcio/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/grpcio/1.64.1/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/grpcio/1.64.1/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [grpcio-status](https://grpc.io) | `1.62.2` -> `1.66.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/grpcio-status/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/grpcio-status/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/grpcio-status/1.62.2/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/grpcio-status/1.62.2/1.66.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [httpx](https://redirect.github.com/encode/httpx) ([changelog](https://redirect.github.com/encode/httpx/blob/master/CHANGELOG.md)) | `0.27.0` -> `0.27.2` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/httpx/0.27.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/httpx/0.27.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/httpx/0.27.0/0.27.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/httpx/0.27.0/0.27.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [huggingface-hub](https://redirect.github.com/huggingface/huggingface_hub) | `0.23.4` -> `0.25.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/huggingface-hub/0.25.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/huggingface-hub/0.25.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/huggingface-hub/0.23.4/0.25.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/huggingface-hub/0.23.4/0.25.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [idna](https://redirect.github.com/kjd/idna) ([changelog](https://redirect.github.com/kjd/idna/blob/master/HISTORY.rst)) | `3.7` -> `3.10` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/idna/3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/idna/3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/idna/3.7/3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/idna/3.7/3.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [importlib-metadata](https://redirect.github.com/python/importlib_metadata) | `7.1.0` -> `7.2.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/importlib-metadata/7.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/importlib-metadata/7.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/importlib-metadata/7.1.0/7.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/importlib-metadata/7.1.0/7.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [importlib-resources](https://redirect.github.com/python/importlib_resources) | `6.4.0` -> `6.4.5` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/importlib-resources/6.4.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/importlib-resources/6.4.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/importlib-resources/6.4.0/6.4.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/importlib-resources/6.4.0/6.4.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [kfp](https://redirect.github.com/kubeflow/pipelines) ([changelog](https://redirect.github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md)) | `==2.8.0` -> `==2.9.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/kfp/2.9.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/kfp/2.9.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/kfp/2.8.0/2.9.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/kfp/2.8.0/2.9.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [kiwisolver](https://redirect.github.com/nucleic/kiwi) ([changelog](https://redirect.github.com/nucleic/kiwi/blob/main/releasenotes.rst)) | `1.4.5` -> `1.4.7` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/kiwisolver/1.4.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/kiwisolver/1.4.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/kiwisolver/1.4.5/1.4.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/kiwisolver/1.4.5/1.4.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [langchain](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true)) | `0.2.8` -> `0.3.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain/0.2.8/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain/0.2.8/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [langchain](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true)) | `==0.2.16` -> `==0.3.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain/0.2.16/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain/0.2.16/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [langchain-community](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-community%3D%3D0%22&expanded=true)) | `0.2.9` -> `0.3.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-community/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-community/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-community/0.2.9/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-community/0.2.9/0.3.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [langchain-core](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true)) | `0.2.19` -> `0.3.7` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-core/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-core/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-core/0.2.19/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-core/0.2.19/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [langchain-core](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true)) | `==0.2.38` -> `==0.3.7` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-core/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-core/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-core/0.2.38/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-core/0.2.38/0.3.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [langchain-google-alloydb-pg](https://redirect.github.com/googleapis/langchain-google-alloydb-pg-python) ([changelog](https://redirect.github.com/googleapis/langchain-google-alloydb-pg-python/blob/main/CHANGELOG.md)) | `==0.6.0` -> `==0.7.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-google-alloydb-pg/0.7.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-google-alloydb-pg/0.7.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-google-alloydb-pg/0.6.0/0.7.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-google-alloydb-pg/0.6.0/0.7.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [langchain-google-vertexai](https://redirect.github.com/langchain-ai/langchain-google) | `1.0.6` -> `1.0.10` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-google-vertexai/1.0.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-google-vertexai/1.0.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-google-vertexai/1.0.6/1.0.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-google-vertexai/1.0.6/1.0.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [langchain-openai](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-openai%3D%3D0%22&expanded=true)) | `0.1.16` -> `0.2.1` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-openai/0.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-openai/0.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-openai/0.1.16/0.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-openai/0.1.16/0.2.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [langchain-text-splitters](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-text-splitters%3D%3D0%22&expanded=true)) | `0.2.2` -> `0.3.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-text-splitters/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-text-splitters/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-text-splitters/0.2.2/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-text-splitters/0.2.2/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [langchain-text-splitters](https://redirect.github.com/langchain-ai/langchain) ([changelog](https://redirect.github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-text-splitters%3D%3D0%22&expanded=true)) | `==0.2.4` -> `==0.3.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langchain-text-splitters/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langchain-text-splitters/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langchain-text-splitters/0.2.4/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langchain-text-splitters/0.2.4/0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | minor | | [langsmith](https://smith.langchain.com/) ([source](https://redirect.github.com/langchain-ai/langsmith-sdk)) | `0.1.85` -> `0.1.129` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/langsmith/0.1.129?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/langsmith/0.1.129?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/langsmith/0.1.85/0.1.129?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/langsmith/0.1.85/0.1.129?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | llama-cloud | `0.0.11` -> `0.1.0` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/llama-cloud/0.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/llama-cloud/0.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/llama-cloud/0.0.11/0.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/llama-cloud/0.0.11/0.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [llama-index](https://llamaindex.ai) ([source](https://redirect.github.com/run-llama/llama_index)) | `0.10.58` -> `0.11.14` | [![age](https://developer.mend.io/api/mc/badges/age/pypi/llama-index/0.11.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/pypi/llama-index/0.11.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/pypi/llama-index/0.10.58/0.11.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/pypi/llama-index/0.10.58/0.11.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | depe Co-authored-by: Holt Skinner --- .github/actions/spelling/allow.txt | 2 + conversation/chat-app/package-lock.json | 161 ++++++----- .../src/requirements.txt | 10 +- .../function_calling_service/requirements.txt | 2 +- .../sql-talk-app/requirements.txt | 4 +- gemini/sample-apps/fixmycar/backend/pom.xml | 10 +- gemini/sample-apps/llamaindex-rag/Dockerfile | 2 +- .../sample-apps/llamaindex-rag/pyproject.toml | 262 +++++++++--------- .../sample-apps/llamaindex-rag/ui/Dockerfile | 2 +- .../ag-web/app/requirements.txt | 2 +- search/cloud-function/python/requirements.txt | 4 +- 11 files changed, 247 insertions(+), 214 deletions(-) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 852e78d42c..c5eb6753e6 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -637,6 +637,7 @@ usecases username usernames uvb +uvicorn vapo vectordb vertexai @@ -663,6 +664,7 @@ xcodeproj xcscheme xctest xlabel +xsi xticks xxxxxxxx yaxes diff --git a/conversation/chat-app/package-lock.json b/conversation/chat-app/package-lock.json index c36f273ab0..8a572105d5 100644 --- a/conversation/chat-app/package-lock.json +++ b/conversation/chat-app/package-lock.json @@ -433,19 +433,19 @@ } }, "node_modules/@floating-ui/dom": { - "version": "1.6.10", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.10.tgz", - "integrity": "sha512-fskgCFv8J8OamCmyun8MfjB1Olfn+uZKjOKZ0vhYF3gRmEUXcGOjxWL8bBr7i4kIuPZ2KD2S3EUIOxnjC8kl2A==", + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.11.tgz", + "integrity": "sha512-qkMCxSR24v2vGkhYDo/UzxfJN3D4syqSjyuTFz6C7XcpU1pASPRieNI0Kj5VP3/503mOfYiGY891ugBX1GlABQ==", "license": "MIT", "dependencies": { "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.7" + "@floating-ui/utils": "^0.2.8" } }, "node_modules/@floating-ui/utils": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.7.tgz", - "integrity": "sha512-X8R8Oj771YRl/w+c1HqAC1szL8zWQRwFvgDwT129k9ACdBoud/+/rX9V0qiMl6LWUdP9voC2nDVZYPMQQsb6eA==", + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz", + "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==", "license": "MIT" }, "node_modules/@jridgewell/gen-mapping": { @@ -804,9 +804,9 @@ ] }, "node_modules/@sveltejs/adapter-auto": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@sveltejs/adapter-auto/-/adapter-auto-3.2.4.tgz", - "integrity": "sha512-a64AKYbfTUrVwU0xslzv1Jf3M8bj0IwhptaXmhgIkjXspBXhD0od9JiItQHchijpLMGdEDcYBlvqySkEawv6mQ==", + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/@sveltejs/adapter-auto/-/adapter-auto-3.2.5.tgz", + "integrity": "sha512-27LR+uKccZ62lgq4N/hvyU2G+hTP9fxWEAfnZcl70HnyfAjMSsGk1z/SjAPXNCD1mVJIE7IFu3TQ8cQ/UH3c0A==", "dev": true, "license": "MIT", "dependencies": { @@ -817,9 +817,9 @@ } }, "node_modules/@sveltejs/adapter-static": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@sveltejs/adapter-static/-/adapter-static-3.0.4.tgz", - "integrity": "sha512-Qm4GAHCnRXwfWG9/AtnQ7mqjyjTs7i0Opyb8H2KH9rMR7fLxqiPx/tXeoE6HHo66+72CjyOb4nFH3lrejY4vzA==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@sveltejs/adapter-static/-/adapter-static-3.0.5.tgz", + "integrity": "sha512-kFJR7RxeB6FBvrKZWAEzIALatgy11ISaaZbcPup8JdWUdrmmfUHHTJ738YHJTEfnCiiXi6aX8Q6ePY7tnSMD6Q==", "dev": true, "license": "MIT", "peerDependencies": { @@ -827,16 +827,16 @@ } }, "node_modules/@sveltejs/kit": { - "version": "2.5.26", - "resolved": "https://registry.npmjs.org/@sveltejs/kit/-/kit-2.5.26.tgz", - "integrity": "sha512-8l1JTIM2L+bS8ebq1E+nGjv/YSKSnD9Q19bYIUkc41vaEG2JjVUx6ikvPIJv2hkQAuqJLzoPrXlKk4KcyWOv3Q==", + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@sveltejs/kit/-/kit-2.6.1.tgz", + "integrity": "sha512-QFlch3GPGZYidYhdRAub0fONw8UTguPICFHUSPxNkA/jdlU1p6C6yqq19J1QWdxIHS2El/ycDCGrHb3EAiMNqg==", "dev": true, "hasInstallScript": true, "license": "MIT", "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^0.6.0", - "devalue": "^5.0.0", + "devalue": "^5.1.0", "esm-env": "^1.0.0", "import-meta-resolve": "^4.1.0", "kleur": "^4.1.5", @@ -954,9 +954,9 @@ } }, "node_modules/apexcharts": { - "version": "3.53.0", - "resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-3.53.0.tgz", - "integrity": "sha512-QESZHZY3w9LPQ64PGh1gEdfjYjJ5Jp+Dfy0D/CLjsLOPTpXzdxwlNMqRj+vPbTcP0nAHgjWv1maDqcEq6u5olw==", + "version": "3.54.0", + "resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-3.54.0.tgz", + "integrity": "sha512-ZgI/seScffjLpwNRX/gAhIkAhpCNWiTNsdICv7qxnF0xisI23XSsaENUKIcMlyP1rbe8ECgvybDnp7plZld89A==", "license": "MIT", "dependencies": { "@yr/monotone-cubic-spline": "^1.0.3", @@ -1275,9 +1275,9 @@ } }, "node_modules/devalue": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/devalue/-/devalue-5.0.0.tgz", - "integrity": "sha512-gO+/OMXF7488D+u3ue+G7Y4AA3ZmUnB3eHJXmBTgNHvr4ZNzl36A0ZtG+XCRNYCkYx/bFmw4qtkoFLa+wSrwAA==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/devalue/-/devalue-5.1.1.tgz", + "integrity": "sha512-maua5KUiapvEwiEAe+XnlZ3Rh0GD+qI1J/nb9vrJc3muPXvcF/8gXYTWF76+5DAqHyDUtOIImEuo0YKE9mshVw==", "dev": true, "license": "MIT" }, @@ -1412,9 +1412,9 @@ } }, "node_modules/flowbite": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/flowbite/-/flowbite-2.5.1.tgz", - "integrity": "sha512-7jP1jy9c3QP7y+KU9lc8ueMkTyUdMDvRP+lteSWgY5TigSZjf9K1kqZxmqjhbx2gBnFQxMl1GAjVThCa8cEpKA==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/flowbite/-/flowbite-2.5.2.tgz", + "integrity": "sha512-kwFD3n8/YW4EG8GlY3Od9IoKND97kitO+/ejISHSqpn3vw2i5K/+ZI8Jm2V+KC4fGdnfi0XZ+TzYqQb4Q1LshA==", "license": "MIT", "dependencies": { "@popperjs/core": "^2.9.3", @@ -1433,14 +1433,14 @@ } }, "node_modules/flowbite-svelte": { - "version": "0.46.16", - "resolved": "https://registry.npmjs.org/flowbite-svelte/-/flowbite-svelte-0.46.16.tgz", - "integrity": "sha512-NkyMS/d1EwuL1cqstSUflnG9vhhBiNyUiAw51D8lfPKDfUG1iXc4+HueQw01zhHv3uSXRJRToFBrg6npxeJ3jw==", + "version": "0.46.22", + "resolved": "https://registry.npmjs.org/flowbite-svelte/-/flowbite-svelte-0.46.22.tgz", + "integrity": "sha512-U+YeJ3ye1OV/9d9VGff/0yuWxkv2Cdk71bvy5JJszIjfEfRvEzX5Ovjm6SHYp51B6hLwEdmMFJXPo5bTmEMGkA==", "license": "MIT", "dependencies": { - "@floating-ui/dom": "^1.6.10", - "apexcharts": "^3.53.0", - "flowbite": "^2.5.1", + "@floating-ui/dom": "^1.6.11", + "apexcharts": "^3.54.0", + "flowbite": "^2.5.2", "tailwind-merge": "^2.5.2" }, "engines": { @@ -1861,9 +1861,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", "dev": true, "license": "ISC" }, @@ -1897,9 +1897,9 @@ } }, "node_modules/postcss": { - "version": "8.4.45", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz", - "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "dev": true, "funding": [ { @@ -1918,8 +1918,8 @@ "license": "MIT", "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.0.1", - "source-map-js": "^1.2.0" + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -2072,9 +2072,9 @@ } }, "node_modules/prettier-plugin-svelte": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/prettier-plugin-svelte/-/prettier-plugin-svelte-3.2.6.tgz", - "integrity": "sha512-Y1XWLw7vXUQQZmgv1JAEiLcErqUniAF2wO7QJsw8BVMvpLET2dI5WpEIEJx1r11iHVdSMzQxivyfrH9On9t2IQ==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/prettier-plugin-svelte/-/prettier-plugin-svelte-3.2.7.tgz", + "integrity": "sha512-/Dswx/ea0lV34If1eDcG3nulQ63YNr5KPDfMsjbdtpSWOxKKJ7nAc2qlVuYwEvCr4raIuredNoR7K4JCkmTGaQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -2083,9 +2083,9 @@ } }, "node_modules/prettier-plugin-tailwindcss": { - "version": "0.6.6", - "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.6.6.tgz", - "integrity": "sha512-OPva5S7WAsPLEsOuOWXATi13QrCKACCiIonFgIR6V4lYv4QLp++UXVhZSzRbZxXGimkQtQT86CC6fQqTOybGng==", + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.6.8.tgz", + "integrity": "sha512-dGu3kdm7SXPkiW4nzeWKCl3uoImdd5CTZEJGxyypEPL37Wj0HT2pLqjrvSei1nTeuQfO4PUfjeW5cTUNRLZ4sA==", "dev": true, "license": "MIT", "engines": { @@ -2319,9 +2319,10 @@ } }, "node_modules/source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } @@ -2405,14 +2406,14 @@ } }, "node_modules/svelte-check": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/svelte-check/-/svelte-check-4.0.1.tgz", - "integrity": "sha512-AuWnCZdREoOzMhoptHPUUPYUxLNdXSkoZnPnlv19SZJJimRzLmjjZLKsOiRB4AnhgX+56/WSEdvkWXI/q2BSsA==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/svelte-check/-/svelte-check-4.0.4.tgz", + "integrity": "sha512-AcHWIPuZb1mh/jKoIrww0ebBPpAvwWd1bfXCnwC2dx4OkydNMaiG//+Xnry91RJMHFH7CiE+6Y2p332DRIaOXQ==", "dev": true, "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.25", - "chokidar": "^3.4.1", + "chokidar": "^4.0.1", "fdir": "^6.2.0", "picocolors": "^1.0.0", "sade": "^1.7.4" @@ -2428,10 +2429,26 @@ "typescript": ">=5.0.0" } }, + "node_modules/svelte-check/node_modules/chokidar": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.1.tgz", + "integrity": "sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/svelte-check/node_modules/fdir": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.3.0.tgz", - "integrity": "sha512-QOnuT+BOtivR77wYvCWHfGt9s4Pz1VIMbD463vegT5MLqNXy8rYFT/lPVEqf/bhYeT6qmqrNHhsX+rWwe3rOCQ==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.0.tgz", + "integrity": "sha512-3oB133prH1o4j/L5lLW7uOCF1PlD+/It2L0eL/iAqWMB91RBbqTewABqxhj0ibBd90EEmWZq7ntIWzVaWcXTGQ==", "dev": true, "license": "MIT", "peerDependencies": { @@ -2458,6 +2475,20 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/svelte-check/node_modules/readdirp": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.0.1.tgz", + "integrity": "sha512-GkMg9uOTpIWWKbSsgwb5fA4EavTR+SG/PMPoAY8hkhHfEEY0/vqljY+XHqtDf2cr2IJtoNRDbrrEpZUiZCkYRw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/svelte-hmr": { "version": "0.15.3", "resolved": "https://registry.npmjs.org/svelte-hmr/-/svelte-hmr-0.15.3.tgz", @@ -2573,9 +2604,9 @@ } }, "node_modules/tailwindcss": { - "version": "3.4.10", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.10.tgz", - "integrity": "sha512-KWZkVPm7yJRhdu4SRSl9d4AK2wM3a50UsvgHZO7xY77NQr2V+fIrEuoDGQcbvswWvFGbS2f6e+jC/6WJm1Dl0w==", + "version": "3.4.13", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.13.tgz", + "integrity": "sha512-KqjHOJKogOUt5Bs752ykCeiwvi0fKVkr5oqsFNt/8px/tA8scFPIlkygsf6jXrfCqGHz7VflA6+yytWuM+XhFw==", "dev": true, "license": "MIT", "dependencies": { @@ -2725,9 +2756,9 @@ "license": "0BSD" }, "node_modules/typescript": { - "version": "5.5.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz", - "integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==", + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", + "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", "dev": true, "license": "Apache-2.0", "bin": { @@ -2776,9 +2807,9 @@ "dev": true }, "node_modules/vite": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.6.tgz", - "integrity": "sha512-IeL5f8OO5nylsgzd9tq4qD2QqI0k2CQLGrWD0rCN0EQJZpBK5vJAx0I+GDkMOXxQX/OfFHMuLIx6ddAxGX/k+Q==", + "version": "5.4.8", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.8.tgz", + "integrity": "sha512-FqrItQ4DT1NC4zCUqMB4c4AZORMKIa0m8/URVCZ77OZ/QSNeJ54bU1vrFADbDsuwfIPcgknRkmqakQcgnL4GiQ==", "dev": true, "license": "MIT", "dependencies": { diff --git a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/requirements.txt b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/requirements.txt index c106ddd766..883edaba07 100644 --- a/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/requirements.txt +++ b/gemini/evaluation/legacy/champion_challenger_eval_pipeline/src/requirements.txt @@ -1,7 +1,7 @@ -kfp==2.8.0 -pytest==8.3.2 -pandas==2.2.2 +kfp==2.9.0 +pytest==8.3.3 +pandas==2.2.3 google-cloud-core==2.4.1 google-cloud-storage==2.18.2 -google-cloud-bigquery==3.25.0 -google-cloud-aiplatform==1.63.0 \ No newline at end of file +google-cloud-bigquery==3.26.0 +google-cloud-aiplatform==1.68.0 \ No newline at end of file diff --git a/gemini/function-calling/function_calling_service/requirements.txt b/gemini/function-calling/function_calling_service/requirements.txt index a17dbf5a4f..0b14077503 100644 --- a/gemini/function-calling/function_calling_service/requirements.txt +++ b/gemini/function-calling/function_calling_service/requirements.txt @@ -1,4 +1,4 @@ -google-cloud-aiplatform==1.65.0 +google-cloud-aiplatform==1.68.0 Flask==3.0.3 gunicorn==23.0.0 Werkzeug==3.0.4 diff --git a/gemini/function-calling/sql-talk-app/requirements.txt b/gemini/function-calling/sql-talk-app/requirements.txt index 74e6c1c74d..494d76fe9a 100644 --- a/gemini/function-calling/sql-talk-app/requirements.txt +++ b/gemini/function-calling/sql-talk-app/requirements.txt @@ -1,3 +1,3 @@ -google-cloud-aiplatform==1.65.0 -google-cloud-bigquery==3.25.0 +google-cloud-aiplatform==1.68.0 +google-cloud-bigquery==3.26.0 streamlit==1.38.0 diff --git a/gemini/sample-apps/fixmycar/backend/pom.xml b/gemini/sample-apps/fixmycar/backend/pom.xml index 0d81c05829..12d5eb674d 100644 --- a/gemini/sample-apps/fixmycar/backend/pom.xml +++ b/gemini/sample-apps/fixmycar/backend/pom.xml @@ -6,7 +6,7 @@ org.springframework.boot spring-boot-starter-parent - 3.3.3 + 3.3.4 com.cpet @@ -24,7 +24,7 @@ com.google.cloud libraries-bom - 26.45.0 + 26.47.0 pom import @@ -76,17 +76,17 @@ org.apache.logging.log4j log4j-api - 2.24.0 + 2.24.1 org.apache.logging.log4j log4j-core - 2.24.0 + 2.24.1 org.apache.logging.log4j log4j-slf4j-impl - 2.24.0 + 2.24.1 diff --git a/gemini/sample-apps/llamaindex-rag/Dockerfile b/gemini/sample-apps/llamaindex-rag/Dockerfile index c776cc6b35..cb11d4bd3f 100644 --- a/gemini/sample-apps/llamaindex-rag/Dockerfile +++ b/gemini/sample-apps/llamaindex-rag/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10-slim +FROM python:3.12-slim WORKDIR /app diff --git a/gemini/sample-apps/llamaindex-rag/pyproject.toml b/gemini/sample-apps/llamaindex-rag/pyproject.toml index 9d6d3e007b..f2d869acc7 100644 --- a/gemini/sample-apps/llamaindex-rag/pyproject.toml +++ b/gemini/sample-apps/llamaindex-rag/pyproject.toml @@ -15,88 +15,88 @@ package-mode = false [tool.poetry.dependencies] python = "^3.10" -pyyaml = "6.0.1" -llama-index = "0.10.58" -aiohttp = "3.10.5" +pyyaml = "6.0.2" +llama-index = "0.11.14" +aiohttp = "3.10.8" aiosignal = "1.3.1" -altair = "5.3.0" +altair = "5.4.1" annotated-types = "0.7.0" -antlr4-python3-runtime = "4.9.3" -anyio = "4.4.0" +antlr4-python3-runtime = "4.13.2" +anyio = "4.6.0" appdirs = "1.4.4" asgiref = "3.8.1" async-timeout = "4.0.3" attrs = "24.2.0" backoff = "2.2.1" -bcrypt = "4.1.3" +bcrypt = "4.2.0" beautifulsoup4 = "4.12.3" blinker = "1.8.2" -build = "1.2.1" +build = "1.2.2" cachecontrol = "0.14.0" -cachetools = "5.4.0" -certifi = "2024.7.4" -cffi = "1.16.0" +cachetools = "5.5.0" +certifi = "2024.8.30" +cffi = "1.17.1" charset-normalizer = "3.3.2" -chroma-hnswlib = "0.7.5" -chromadb = "0.5.4" +chroma-hnswlib = "0.7.6" +chromadb = "0.5.11" cleo = "2.1.0" click = "8.1.7" coloredlogs = "15.0.1" -contourpy = "1.2.1" +contourpy = "1.3.0" crashtest = "0.4.1" cryptography = "43.0.1" cycler = "0.12.1" dataclasses-json = "0.6.7" -datasets = "2.20.0" +datasets = "2.21.0" deprecated = "1.2.14" -dill = "0.3.8" +dill = "0.3.9" dirtyjson = "1.0.8" distlib = "0.3.8" distro = "1.9.0" dnspython = "2.6.1" docstring-parser = "0.16" -dulwich = "0.21.7" +dulwich = "0.22.1" email-validator = "2.2.0" entrypoints = "0.4" exceptiongroup = "1.2.2" -faker = "30.0.0" -fastapi = "0.111.1" -fastapi-cli = "0.0.4" +faker = "30.1.0" +fastapi = "0.115.0" +fastapi-cli = "0.0.5" fastjsonschema = "2.20.0" favicon = "0.7.0" -filelock = "3.15.4" +filelock = "3.16.1" flatbuffers = "24.3.25" -fonttools = "4.53.1" +fonttools = "4.54.1" frozenlist = "1.4.1" gitdb = "4.0.11" gitpython = "3.1.43" -google-api-core = "2.19.1" -google-auth = "2.32.0" -google-cloud-aiplatform = "1.59.0" -google-cloud-bigquery = "3.25.0" +google-api-core = "2.20.0" +google-auth = "2.35.0" +google-cloud-aiplatform = "1.68.0" +google-cloud-bigquery = "3.26.0" google-cloud-core = "2.4.1" -google-cloud-documentai = "2.29.2" -google-cloud-resource-manager = "1.12.4" -google-cloud-secret-manager = "2.20.1" -google-cloud-storage = "2.17.0" -google-crc32c = "1.5.0" -google-resumable-media = "2.7.1" -googleapis-common-protos = "1.63.2" -greenlet = "3.0.3" +google-cloud-documentai = "2.32.0" +google-cloud-resource-manager = "1.12.5" +google-cloud-secret-manager = "2.20.2" +google-cloud-storage = "2.18.2" +google-crc32c = "1.6.0" +google-resumable-media = "2.7.2" +googleapis-common-protos = "1.65.0" +greenlet = "3.1.1" grpc-google-iam-v1 = "0.13.1" -grpcio = "1.64.1" -grpcio-status = "1.62.2" +grpcio = "1.66.2" +grpcio-status = "1.66.2" h11 = "0.14.0" htbuilder = "0.6.2" httpcore = "1.0.5" httptools = "0.6.1" -httpx = "0.27.0" -huggingface-hub = "0.23.4" +httpx = "0.27.2" +huggingface-hub = "0.25.1" humanfriendly = "10.0" hydra-core = "1.3.2" -idna = "3.7" -importlib-metadata = "7.1.0" -importlib-resources = "6.4.0" +idna = "3.10" +importlib-metadata = "7.2.1" +importlib-resources = "6.4.5" iniconfig = "2.0.0" installer = "0.7.0" jaraco-classes = "3.4.0" @@ -107,50 +107,50 @@ jsonpatch = "1.33" jsonpointer = "3.0.0" jsonschema = "4.23.0" keyring = "24.3.1" -kiwisolver = "1.4.5" +kiwisolver = "1.4.7" kubernetes = "31.0.0" -langchain = "0.2.8" -langchain-community = "0.2.9" -langchain-core = "0.2.19" -langchain-google-vertexai = "1.0.6" -langchain-openai = "0.1.16" -langchain-text-splitters = "0.2.2" -langsmith = "0.1.85" -llama-cloud = "0.0.11" -llama-index-agent-openai = "0.2.9" -llama-index-cli = "0.1.13" -llama-index-core = "0.10.58" -llama-index-embeddings-openai = "0.1.11" -llama-index-embeddings-vertex = "0.1.0" -llama-index-indices-managed-llama-cloud = "0.2.6" -llama-index-legacy = "0.9.48" -llama-index-llms-openai = "0.1.27" -llama-index-llms-vertex = "0.2.2" -llama-index-multi-modal-llms-openai = "0.1.8" -llama-index-program-openai = "0.1.7" -llama-index-question-gen-openai = "0.1.3" -llama-index-readers-file = "0.1.30" -llama-index-readers-llama-parse = "0.1.6" -llama-index-vector-stores-chroma = "0.1.10" -llama-index-vector-stores-vertexaivectorsearch = "0.0.1" -llama-parse = "0.4.9" -lxml = "5.2.2" -markdown = "3.6" +langchain = "0.3.1" +langchain-community = "0.3.1" +langchain-core = "0.3.7" +langchain-google-vertexai = "1.0.10" +langchain-openai = "0.2.1" +langchain-text-splitters = "0.3.0" +langsmith = "0.1.129" +llama-cloud = "0.1.0" +llama-index-agent-openai = "0.3.4" +llama-index-cli = "0.3.1" +llama-index-core = "0.11.14" +llama-index-embeddings-openai = "0.2.5" +llama-index-embeddings-vertex = "0.2.1" +llama-index-indices-managed-llama-cloud = "0.4.0" +llama-index-legacy = "0.9.48.post3" +llama-index-llms-openai = "0.2.9" +llama-index-llms-vertex = "0.3.6" +llama-index-multi-modal-llms-openai = "0.2.1" +llama-index-program-openai = "0.2.0" +llama-index-question-gen-openai = "0.2.0" +llama-index-readers-file = "0.2.2" +llama-index-readers-llama-parse = "0.3.0" +llama-index-vector-stores-chroma = "0.2.0" +llama-index-vector-stores-vertexaivectorsearch = "0.1.0" +llama-parse = "0.5.6" +lxml = "5.3.0" +markdown = "3.7" markdown-it-py = "3.0.0" markdownify = "0.13.1" markdownlit = "0.0.7" marko = "2.1.2" markupsafe = "2.1.5" -marshmallow = "3.21.3" -matplotlib = "3.9.1" +marshmallow = "3.22.0" +matplotlib = "3.9.2" mdurl = "0.1.2" mmh3 = "4.1.0" monotonic = "1.6" -more-itertools = "10.3.0" +more-itertools = "10.5.0" mpmath = "1.3.0" -msgpack = "1.0.8" -multidict = "6.0.5" -multiprocess = "0.70.16" +msgpack = "1.1.0" +multidict = "6.1.0" +multiprocess = "0.70.17" mypy-extensions = "1.0.0" nest-asyncio = "1.6.0" networkx = "3.3" @@ -158,122 +158,122 @@ nltk = "3.9.1" numpy = "1.26.4" oauthlib = "3.2.2" omegaconf = "2.3.0" -onnxruntime = "1.18.1" -openai = "1.37.0" -opentelemetry-api = "1.25.0" -opentelemetry-exporter-otlp-proto-common = "1.25.0" -opentelemetry-exporter-otlp-proto-grpc = "1.25.0" +onnxruntime = "1.19.2" +openai = "1.50.2" +opentelemetry-api = "1.27.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-exporter-otlp-proto-grpc = "1.27.0" opentelemetry-instrumentation = "0.46b0" opentelemetry-instrumentation-asgi = "0.46b0" opentelemetry-instrumentation-fastapi = "0.46b0" -opentelemetry-proto = "1.25.0" -opentelemetry-sdk = "1.25.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = "1.27.0" opentelemetry-semantic-conventions = "0.46b0" opentelemetry-util-http = "0.46b0" -orjson = "3.10.6" +orjson = "3.10.7" overrides = "7.7.0" packaging = "24.1" -pandas = "2.2.2" +pandas = "2.2.3" pdfminer-six = "20240706" pexpect = "4.9.0" pillow = "10.4.0" pkginfo = "1.11.1" -platformdirs = "4.2.2" +platformdirs = "4.3.6" pluggy = "1.5.0" poetry-core = "1.9.0" poetry-plugin-export = "1.8.0" -posthog = "3.5.0" -prometheus-client = "0.20.0" +posthog = "3.6.6" +prometheus-client = "0.21.0" proto-plus = "1.24.0" -protobuf = "4.25.3" +protobuf = "4.25.5" ptyprocess = "0.7.0" pyarrow = "15.0.2" pyarrow-hotfix = "0.6" -pyasn1 = "0.6.0" -pyasn1-modules = "0.4.0" +pyasn1 = "0.6.1" +pyasn1-modules = "0.4.1" pycparser = "2.22" -pydantic = "2.8.2" -pydantic-core = "2.20.1" +pydantic = "2.9.2" +pydantic-core = "2.24.0" pydeck = "0.9.1" pygments = "2.18.0" -pymdown-extensions = "10.8.1" -pyparsing = "3.1.2" +pymdown-extensions = "10.11.1" +pyparsing = "3.1.4" pypdf = "4.3.1" pypika = "0.48.9" -pyproject-hooks = "1.1.0" +pyproject-hooks = "1.2.0" pysbd = "0.3.4" -pytest = "8.2.2" +pytest = "8.3.3" python-dateutil = "2.9.0.post0" python-dotenv = "1.0.1" -python-multipart = "0.0.9" -pytz = "2024.1" -ragas = "0.1.9" -rapidfuzz = "3.9.4" +python-multipart = "0.0.12" +pytz = "2024.2" +ragas = "0.1.20" +rapidfuzz = "3.10.0" referencing = "0.35.1" -regex = "2024.7.24" +regex = "2024.9.11" requests = "2.32.3" requests-oauthlib = "2.0.0" requests-toolbelt = "1.0.0" -rich = "13.7.1" -rpds-py = "0.19.0" +rich = "13.8.1" +rpds-py = "0.20.0" rsa = "4.9" secretstorage = "3.3.3" -shapely = "2.0.5" +shapely = "2.0.6" shellingham = "1.5.4" six = "1.16.0" smmap = "5.0.1" sniffio = "1.3.1" -soupsieve = "2.5" -sqlalchemy = "2.0.31" +soupsieve = "2.6" +sqlalchemy = "2.0.35" st-annotated-text = "4.0.1" st-theme = "1.2.3" -starlette = "0.37.2" +starlette = "0.39.2" streamlit = "1.38.0" streamlit-camera-input-live = "0.2.0" streamlit-card = "1.0.2" streamlit-embedcode = "0.1.2" -streamlit-extras = "0.4.3" +streamlit-extras = "0.4.7" streamlit-faker = "0.0.3" streamlit-image-coordinates = "0.1.9" streamlit-keyup = "0.2.4" streamlit-toggle-switch = "1.0.2" streamlit-vertical-slider = "2.5.5" striprtf = "0.0.26" -sympy = "1.13.0" +sympy = "1.13.3" tenacity = "8.5.0" tiktoken = "0.7.0" -tokenizers = "0.19.1" +tokenizers = "0.20.0" toml = "0.10.2" tomli = "2.0.1" -tomlkit = "0.13.0" +tomlkit = "0.13.2" toolz = "0.12.1" tornado = "6.4.1" -tqdm = "4.66.4" -trove-classifiers = "2024.7.2" -typer = "0.12.3" +tqdm = "4.66.5" +trove-classifiers = "2024.9.12" +typer = "0.12.5" typing-inspect = "0.9.0" typing-extensions = "4.12.2" -tzdata = "2024.1" -urllib3 = "2.2.2" -uvicorn = "0.30.1" -uvloop = "0.19.0" -validators = "0.33.0" -virtualenv = "20.26.3" -watchdog = "4.0.1" -watchfiles = "0.22.0" +tzdata = "2024.2" +urllib3 = "2.2.3" +uvicorn = "0.31.0" +uvloop = "0.20.0" +validators = "0.34.0" +virtualenv = "20.26.6" +watchdog = "4.0.2" +watchfiles = "0.24.0" websocket-client = "1.8.0" websockets = "12.0" wrapt = "1.16.0" -xxhash = "3.4.1" -yarl = "1.9.4" -zipp = "3.19.2" -llama-index-storage-docstore-firestore = "^0.1.3" -llama-index-storage-index-store-firestore = "^0.1.3" -llama-index-retrievers-vertexai-search = "^0.0.1" -llama-index-retrievers-bm25 = "^0.2.2" +xxhash = "3.5.0" +yarl = "1.13.1" +zipp = "3.20.2" +llama-index-storage-docstore-firestore = "^0.2.0" +llama-index-storage-index-store-firestore = "^0.3.0" +llama-index-retrievers-vertexai-search = "^0.1.0" +llama-index-retrievers-bm25 = "^0.3.0" google-cloud-logging = "^3.11.0" -anthropic = {extras = ["vertex"], version = "^0.31.2"} -weave = "^0.50.12" +anthropic = {extras = ["vertex"], version = "^0.34.0"} +weave = "^0.51.0" black = "^24.8.0" flake8 = "^7.1.1" autopep8 = "^2.3.1" diff --git a/gemini/sample-apps/llamaindex-rag/ui/Dockerfile b/gemini/sample-apps/llamaindex-rag/ui/Dockerfile index 812c96d39a..3ad5e08436 100644 --- a/gemini/sample-apps/llamaindex-rag/ui/Dockerfile +++ b/gemini/sample-apps/llamaindex-rag/ui/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10-slim +FROM python:3.12-slim WORKDIR /app diff --git a/gemini/sample-apps/photo-discovery/ag-web/app/requirements.txt b/gemini/sample-apps/photo-discovery/ag-web/app/requirements.txt index 0f0a67ea10..02e6247a57 100644 --- a/gemini/sample-apps/photo-discovery/ag-web/app/requirements.txt +++ b/gemini/sample-apps/photo-discovery/ag-web/app/requirements.txt @@ -2,5 +2,5 @@ Flask==3.0.3 flask-cors gunicorn==23.0.0 Werkzeug==3.0.4 -google-cloud-aiplatform[langchain,reasoningengine]==1.65.0 +google-cloud-aiplatform[langchain,reasoningengine]==1.68.0 google-cloud-discoveryengine==0.12.2 diff --git a/search/cloud-function/python/requirements.txt b/search/cloud-function/python/requirements.txt index e15fbcadc5..344d60e4d1 100644 --- a/search/cloud-function/python/requirements.txt +++ b/search/cloud-function/python/requirements.txt @@ -2,6 +2,6 @@ Flask==3.0.3 functions_framework==3.8.1 google-cloud-discoveryengine>=0.11 mypy==1.11.2 -protobuf==5.28.0 -pytest==8.3.2 +protobuf==5.28.2 +pytest==8.3.3 python-dotenv==1.0.1 \ No newline at end of file From 6bcc1f1e38e57111ebad3bf678146b9c9cafa617 Mon Sep 17 00:00:00 2001 From: Alok Pattani <51244947+alokpattani@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:11:35 -0700 Subject: [PATCH 40/76] fix: Update youtube_video_analysis.ipynb (#1163) --------- Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Co-authored-by: Holt Skinner --- .github/actions/spelling/allow.txt | 47 +++++++++++++++++++ .../youtube_video_analysis.ipynb | 31 +++--------- 2 files changed, 53 insertions(+), 25 deletions(-) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index c5eb6753e6..23cd4e5291 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -10,6 +10,7 @@ Akihiro Aniston Arborio Arepa +Arjen Arsan Ashish Aspeed @@ -17,9 +18,11 @@ Autechre Autorater BIKBEAR BITCODE +Beckham Benno Bettes Biden +Bigtable Bitcoin Borthwick Boyz @@ -33,6 +36,7 @@ CONOUT CUCUONAR CWLEY CZE +Caitlyn Caprese Codelab Codey @@ -48,6 +52,7 @@ Dataform DeepEval Depatmint Disturbia +Doaa Doogler Dreesen Durafast @@ -58,6 +63,7 @@ EIP EMNLP ENU ESG +Eliud Embs Envane Esin @@ -69,6 +75,7 @@ FLX FMWK FPDF FTPS +Federer Finvest Firestore Fishburne @@ -81,6 +88,7 @@ GFile GKE GObject GWLP +Gabeira Gameplay Gandalf Gatace @@ -98,7 +106,9 @@ HREDRAW HSA HVDC Hamamoto +Hamblin Hamers +Harvick Hbf Hickson Hida @@ -107,10 +117,12 @@ Hisaki Hmmm Hogwarts Hubmann +Hyperdisk ICICI ICML INFOPLIST IVF +Isner Jang Jedi Joji @@ -119,13 +131,17 @@ KPIs Kaelen Kaggle Kamradt +Katelyn Kaufmanns Kawagoe Keanu Keown Ketamine Khanh +Kipchoge Knopf +Kohli +Kraizt Kubeflow Kudrow LCEL @@ -141,6 +157,7 @@ Lego Llion Logrus Lottry +MLB MSCHF MSGSEND MTL @@ -150,9 +167,12 @@ Malware Mamah Mandiri Masaru +Meawad Memegen Meteo Mewgler +Milito +Mirai Mitani Mosher Mvar @@ -161,15 +181,19 @@ NCCREATE NDEBUG NGRAM NGRAMS +NICOLAS NMT NOMINMAX NOZORDER NVIDIA +Nagasu Niitsuma Nintendo Noogler OOTB Oberst +Ohashi +Ohtani Olgivanna Ollama Omnibox @@ -182,6 +206,8 @@ Parmar Persero Phaidon Pharma +Pistorius +Priyanka QPM Qwiklabs RAGAS @@ -190,13 +216,18 @@ ROOTSPAN RRF RTN RYDE +Raducanu Reranking Resona Rizzoli +Robben Robeco +Ronaldo +Rousey SDKROOT SEK SEO +SIMONE SKUs SNB SPII @@ -213,11 +244,13 @@ Selam Sestero Shazeer Shenzhou +Shohei Simpsons Siri Skaffold Sketchfab Smartbuy +Statcast Storrer Strappy Surampudi @@ -243,6 +276,7 @@ USERDATA Unimicron Upserting Urs +Usain Uszkoreit VAPO VFT @@ -251,8 +285,10 @@ VOS VREDRAW Vandamm Vaswani +Vergadia Ves Vijay +Virat Viru VirusTotal WDIR @@ -304,6 +340,7 @@ barmode barpolar baxis bbc +bff bigframes bigquery bitcoin @@ -313,6 +350,7 @@ bqml carbonara caudatus caxis +cce cctv cfbundle chatbots @@ -342,6 +380,7 @@ dataframe datname dbadmin dbln +ddbb ddl deepeval dente @@ -376,6 +415,7 @@ eur evals faiss fastapi +fda fect fewshot ffi @@ -397,6 +437,7 @@ fts fulltext funtion gapic +gauff gboolean gbq gchar @@ -453,6 +494,7 @@ iphoneos ipykernel ipynb isa +itable itables iterrows ivf @@ -473,6 +515,7 @@ lakecolor landcolor langchain langgraph +lebron lenzing levelname lexer @@ -486,6 +529,7 @@ logprobs lparam lxml lycra +mahut makeover mapbox maxcold @@ -500,6 +544,7 @@ morty moviepy mpn mrr +mwouts nbconvert nbfmt nbformat @@ -513,6 +558,7 @@ nmade nmilitary notetaker nrows +nunique oai objc ollama @@ -670,6 +716,7 @@ xxxxxxxx yaxes yaxis ylabel +youtube ytd yticks zaxis diff --git a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb index 500437a464..cb553a2d01 100644 --- a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb +++ b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb @@ -106,15 +106,7 @@ "metadata": { "id": "tFy3H3aPgx12" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], + "outputs": [], "source": [ "%pip install --upgrade --user --quiet google-cloud-aiplatform itables" ] @@ -138,18 +130,7 @@ "metadata": { "id": "XRvKdaPDTznN" }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'status': 'ok', 'restart': True}" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import IPython\n", "\n", @@ -436,7 +417,7 @@ "source": [ "Next, we'll show how to extract structured outputs using [controlled generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output), in this case from a video that covers multiple topics.\n", "\n", - "We’re going to see how Gemini Pro’s industry-leading 2 million token context window can help analyze [the full opening keynote](https://www.youtube.com/watch?v=V6DJYGn2SFk) from our Next conference back in April - all 1 hour and 41 minutes of it!" + "We're going to see how Gemini Pro's industry-leading 2 million token context window can help analyze [the full opening keynote](https://www.youtube.com/watch?v=V6DJYGn2SFk) from our Next conference back in April - all 1 hour and 41 minutes of it!" ] }, { @@ -746,7 +727,7 @@ "### Google \"Year in Search\" videos\n", "Now, consider expanding the problem to a more common enterprise use case: extracting information from _multiple_ YouTube videos at once.\n", "\n", - "This time, we’ll use [Google’s “Year in Search” videos](https://about.google/intl/ALL_us/stories/year-in-search/), which summarize the questions, people, and moments that captured the world’s attention in each year. As of fall 2024, there are 14 of these videos, each 2-4 minutes in length, from [2010](https://www.youtube.com/watch?v=F0QXB5pw2qE) through [2023](https://www.youtube.com/watch?v=3KtWfp0UopM).\n", + "This time, we'll use [Google's \"Year in Search\" videos](https://about.google/intl/ALL_us/stories/year-in-search/), which summarize the questions, people, and moments that captured the world's attention in each year. As of fall 2024, there are 14 of these videos, each 2-4 minutes in length, from [2010](https://www.youtube.com/watch?v=F0QXB5pw2qE) through [2023](https://www.youtube.com/watch?v=3KtWfp0UopM).\n", "\n", "We start by reading in a CSV file that has links to all the videos." ] @@ -900,7 +881,7 @@ "source": [ "### Set up for analyzing multiple video files\n", "\n", - "Let’s say we are a sports agency who wants to see which athletes or teams appear most often in these videos as a measure of cultural relevance. Instead of watching and manually counting, we can use Gemini’s multimodal capabilities and world knowledge to extract each appearance of an athlete or team into a structured output that we can use for further analysis.\n", + "Let's say we are a sports agency who wants to see which athletes or teams appear most often in these videos as a measure of cultural relevance. Instead of watching and manually counting, we can use Gemini's multimodal capabilities and world knowledge to extract each appearance of an athlete or team into a structured output that we can use for further analysis.\n", "\n", "The system instructions, prompt, and response schema that will apply to all 14 videos are each created in the cell below." ] @@ -1045,7 +1026,7 @@ "source": [ "### Extract and analyze video results across years\n", "\n", - "Once we have the results from Gemini, we can process them and get table of every athlete or team apperance across all 14 \"Year in Search\" videos." + "Once we have the results from Gemini, we can process them and get table of every athlete or team appearance across all 14 \"Year in Search\" videos." ] }, { From 7f42739ae93a688c26739edbcd3d09c6d8798bbf Mon Sep 17 00:00:00 2001 From: noabenefraim Date: Thu, 3 Oct 2024 16:06:01 -0700 Subject: [PATCH 41/76] feat: LlamaIndex Workflows (#1198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [ ] You are listed as the author in your notebook or README file. - [ ] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [ ] Make your Pull Request title in the specification. - [ ] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- .github/actions/spelling/allow.txt | 4 + .github/actions/spelling/excludes.txt | 1 + .../orchestration/llamaindex_workflows.ipynb | 1096 +++++++++++++++++ rag-grounding/README.md | 1 + 4 files changed, 1102 insertions(+) create mode 100644 gemini/orchestration/llamaindex_workflows.ipynb diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 23cd4e5291..dff17f8f67 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -610,6 +610,7 @@ pypdf pyplot pysftp pyvis +qna qubit qubits ragas @@ -621,7 +622,10 @@ rarians ratelimit redef repreve +reranked reranker +reranking +reranks ribeye ringspun rpet diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt index 74ed9f32e1..d482619f72 100644 --- a/.github/actions/spelling/excludes.txt +++ b/.github/actions/spelling/excludes.txt @@ -103,6 +103,7 @@ ^\Qvision/getting-started/image_generation.ipynb\E$ ^\Qvision/getting-started/visual_captioning.ipynb\E$ ^\Qvision/use-cases/creating_high_quality_visual_assets_with_gemini_and_imagen.ipynb\E$ +^\Qgemini/orchestration/llamaindex_workflows.ipynb\E$ ignore$ ^\Qowlbot.py\E$ ^\Qsearch/bulk-question-answering/bulk_question_answering_output.tsv\E$ diff --git a/gemini/orchestration/llamaindex_workflows.ipynb b/gemini/orchestration/llamaindex_workflows.ipynb new file mode 100644 index 0000000000..eff81896d5 --- /dev/null +++ b/gemini/orchestration/llamaindex_workflows.ipynb @@ -0,0 +1,1096 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "09472b4cfe54" + }, + "source": [ + "# LlamaIndex RAG Workflows using Gemini and Firestore\n", + "\n", + "
\n", - " \n", + " \n", " \"Google
Open in Colab\n", "
\n", "
\n", - " \n", + " \n", " \"Google
Open in Colab Enterprise\n", "
\n", "
\n", - " \n", + " \n", " \"Vertex
Open in Workbench\n", "
\n", "
\n", - " \n", + " \n", " \"GitHub
View on GitHub\n", "
\n", "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "18b1887a153f" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Noa Ben-Efraim](https://github.com/noabenefraim) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4dddad16697f" + }, + "source": [ + "## Overview\n", + "LlamaIndex workflows are a powerful way to orchestrate complex LLM (large language model) applications. They provide an event-driven framework for building AI systems that go beyond simple question-answering.   \n", + "\n", + "Think of a workflow as a series of steps, where each step performs a specific action. These actions can be anything from querying an LLM, to retrieving data from a vector database, to interacting with external APIs. The workflow manages the flow of data between these steps, making it easy to build sophisticated AI applications.   \n", + "\n", + "Here's a breakdown of the key concepts:\n", + "\n", + "+ Events: These trigger actions within the workflow. For example, a user's query can be an initial event that kicks off the workflow.   \n", + "+ Steps: These are individual functions decorated with @step that process events and potentially emit new events. Steps are the building blocks of your workflow.   \n", + "+ Event-driven: This means that the workflow reacts to events as they happen, making it flexible and dynamic." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5bdceacc7b39" + }, + "source": [ + "This notebook perform a complex Retrieval Augmented Generation (RAG) workflow using Gemini models and Firestore databases. There are two branches for this workflow:\n", + "\n", + "_Branch 1_\n", + "+ Start Event triggered by providing a data directory to the workflow\n", + "+ Ingest data using the LlamaIndex `SimpleDirectoryReader`\n", + "+ Load data in the Firestore Database\n", + "\n", + "_Branch 2_\n", + "+ Start Event triggered by providing a query to the workflow\n", + "+ The QueryMultiStep Event that breaks down a complex query into sequential sub-questions using Gemini. Then proceeds to answer the sub-questions.\n", + "+ The sub-questions results are passed to the RerankEvent where given the initial user query, Gemini reranks the returned answers to the sub-questions.\n", + "+ The reranked chunks are passed to the CreateCitationEvents where citations are added to the sub-questions used to generate the answer.\n", + "+ An answer is synthesized for the original query and returned to the user.\n", + "\n", + "References:\n", + "+ https://docs.llamaindex.ai/en/stable/examples/workflow/rag/\n", + "+ https://docs.llamaindex.ai/en/stable/examples/workflow/multi_step_query_engine/\n", + "+ https://docs.llamaindex.ai/en/stable/examples/workflow/citation_query_engine/\n", + "\n", + "\n", + "![RAGWorkflow](https://storage.googleapis.com/github-repo/generative-ai/gemini/orchestration/llamaindex_workflows/RAGWorkflow.png)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ef012cf7cb67" + }, + "source": [ + "## Get started\n", + "\n", + "### Install required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "323a27d12c02" + }, + "outputs": [], + "source": [ + "%pip install llama-index==\"0.11.8\" \\\n", + " llama-index-embeddings-vertex==\"0.2.0\" \\\n", + " llama-index-utils-workflow==\"0.2.1\" \\\n", + " llama-index-llms-vertex==\"0.3.4\" \\\n", + " llama-index-storage-docstore-firestore==\"0.2.0\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f42d12d15616" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f3d98395d9a4" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e114f5653870" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "911453311a5d" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e8d7771a5818" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "This notebook requires the following resources:\n", + "+ Initialized Google Cloud project\n", + "+ Vertex AI API enabled\n", + "+ Existing VPC/Subnet\n", + "+ Existing Firestore database\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "To get started using Firestore Database, refer to the following [documentation](https://cloud.google.com/firestore/docs/manage-databases).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment).\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "e04ae6146ccd" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "FIRESTORE_DATABASE_ID = \"[your-firestore-database-id]\"\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d68eca7a8d4f" + }, + "source": [ + "## Workflow" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "93704f34a080" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "9b7824a768bc" + }, + "outputs": [], + "source": [ + "from typing import Any, cast\n", + "\n", + "from IPython.display import Markdown, display\n", + "from llama_index.core import (\n", + " Settings,\n", + " SimpleDirectoryReader,\n", + " StorageContext,\n", + " VectorStoreIndex,\n", + ")\n", + "from llama_index.core.indices.query.query_transform.base import (\n", + " StepDecomposeQueryTransform,\n", + ")\n", + "from llama_index.core.llms import LLM\n", + "from llama_index.core.node_parser import SentenceSplitter\n", + "from llama_index.core.postprocessor.llm_rerank import LLMRerank\n", + "from llama_index.core.prompts import PromptTemplate\n", + "from llama_index.core.response_synthesizers import (\n", + " ResponseMode,\n", + " get_response_synthesizer,\n", + ")\n", + "from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle, TextNode\n", + "from llama_index.core.workflow import (\n", + " Context,\n", + " Event,\n", + " StartEvent,\n", + " StopEvent,\n", + " Workflow,\n", + " step,\n", + ")\n", + "from llama_index.embeddings.vertex import VertexTextEmbedding\n", + "from llama_index.llms.vertex import Vertex\n", + "from llama_index.storage.docstore.firestore import FirestoreDocumentStore\n", + "from llama_index.utils.workflow import draw_all_possible_flows\n", + "from vertexai.generative_models import HarmBlockThreshold, HarmCategory, SafetySetting" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0e101376082c" + }, + "source": [ + "### Get data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f15a704ea11b" + }, + "outputs": [], + "source": [ + "!mkdir -p './data'\n", + "!wget 'https://www.gutenberg.org/cache/epub/64317/pg64317.txt' -O 'data/gatsby.txt'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "927a7ffd9ad8" + }, + "source": [ + "### Set credentials" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "7d11aac2947b" + }, + "outputs": [], + "source": [ + "import google.auth\n", + "import google.auth.transport.requests\n", + "\n", + "# credentials will now have an api token\n", + "credentials = google.auth.default(quota_project_id=PROJECT_ID)[0]\n", + "request = google.auth.transport.requests.Request()\n", + "credentials.refresh(request)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "af476af08250" + }, + "source": [ + "## Workflow" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "456efd13df2a" + }, + "source": [ + "### Set up the LLM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "695792a24ba9" + }, + "outputs": [], + "source": [ + "safety_config = [\n", + " SafetySetting(\n", + " category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,\n", + " threshold=HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", + " ),\n", + " SafetySetting(\n", + " category=HarmCategory.HARM_CATEGORY_HARASSMENT,\n", + " threshold=HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", + " ),\n", + " SafetySetting(\n", + " category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,\n", + " threshold=HarmBlockThreshold.BLOCK_ONLY_HIGH,\n", + " ),\n", + "]\n", + "embedding_model = VertexTextEmbedding(\n", + " model_name=\"text-embedding-004\", credentials=credentials\n", + ")\n", + "llm = Vertex(\n", + " model=\"gemini-pro\",\n", + " temperature=0.2,\n", + " max_tokens=3000,\n", + " safety_settings=safety_config,\n", + " credentials=credentials,\n", + ")\n", + "\n", + "Settings.embed_model = embedding_model\n", + "Settings.llm = llm" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1b99e7e4aa0d" + }, + "source": [ + "### Define Event classes\n", + "\n", + "Here we will create custom events that can be emitted by steps and trigger other steps. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "31173e6befe2" + }, + "outputs": [], + "source": [ + "class RetrieverEvent(Event):\n", + " \"\"\"Result of running retrieval\"\"\"\n", + "\n", + " nodes: list[NodeWithScore]\n", + "\n", + "\n", + "class RerankEvent(Event):\n", + " \"\"\"Result of running reranking on retrieved nodes\"\"\"\n", + "\n", + " nodes: list[NodeWithScore]\n", + " source_nodes: list[NodeWithScore]\n", + " final_response_metadata: dict[str, Any]\n", + "\n", + "\n", + "class FirestoreIndexData(Event):\n", + " \"\"\"Result of indexing documents in Firestore\"\"\"\n", + "\n", + " status: str\n", + "\n", + "\n", + "class QueryMultiStepEvent(Event):\n", + " \"\"\"\n", + " Event containing results of a multi-step query process.\n", + "\n", + " Attributes:\n", + " nodes (List[NodeWithScore]): List of nodes with their associated scores.\n", + " source_nodes (List[NodeWithScore]): List of source nodes with their scores.\n", + " final_response_metadata (Dict[str, Any]): Metadata associated with the final response.\n", + " \"\"\"\n", + "\n", + " nodes: list[NodeWithScore]\n", + " source_nodes: list[NodeWithScore]\n", + " final_response_metadata: dict[str, Any]\n", + "\n", + "\n", + "class CreateCitationsEvent(Event):\n", + " \"\"\"Add citations to the nodes.\"\"\"\n", + "\n", + " nodes: list[NodeWithScore]\n", + " source_nodes: list[NodeWithScore]\n", + " final_response_metadata: dict[str, Any]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "805eae826756" + }, + "source": [ + "### Update Prompt Templates\n", + "\n", + "Defining custom prompts used for the citation portion of the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "b2ec8b2859f2" + }, + "outputs": [], + "source": [ + "CITATION_QA_TEMPLATE = PromptTemplate(\n", + " \"Your task is to answer the question based on the information given in the sources listed below.\"\n", + " \"Use only the provided sources to answer.\"\n", + " \"Cite the source number(s) for any information you use in your answer (e.g., [1]).\"\n", + " \"Always include at least one source citation in your answer.\"\n", + " \"Only cite a source if you directly use information from it.\"\n", + " \"If the sources don't contain the information needed to answer the question, state that.\"\n", + " \"For example:\"\n", + " \"Source 1: Apples are red, green, or yellow.\"\n", + " \"Source 2: Bananas are yellow when ripe.\"\n", + " \"Source 3: Strawberries are red when ripe.\"\n", + " \"Query: Which fruits are red when ripe?\"\n", + " \"Answer: Apples [1] and strawberries [3] can be red when ripe.\"\n", + " \"------\"\n", + " \"Below are several numbered sources of information:\"\n", + " \"------\"\n", + " \"{context_str}\"\n", + " \"------\"\n", + " \"Query: {query_str}\"\n", + " \"Answer: \"\n", + ")\n", + "\n", + "CITATION_REFINE_TEMPLATE = PromptTemplate(\n", + " \"You have an initial answer to a query.\"\n", + " \"Your job is to improve this answer using the information provided in the numbered sources below. Here's how:\"\n", + " \" - Read the existing answer and the sources carefully.\"\n", + " \" - Identify any information in the sources that can improve the answer by adding details, making it more accurate, or providing better support.\"\n", + " \" - If the sources provide new information, incorporate it into the answer.\"\n", + " \" - If the sources contradict the existing answer, correct the answer.\"\n", + " \" - If the sources aren't helpful, keep the original answer.\"\n", + " \"Cite the source number(s) for any information you use in your answer (e.g., [1]).\"\n", + " \"We have provided an existing answer: {existing_answer}\"\n", + " \"Below are several numbered sources of information. \"\n", + " \"Use them to refine the existing answer. \"\n", + " \"If the provided sources are not helpful, you will repeat the existing answer.\"\n", + " \"------\"\n", + " \"{context_msg}\"\n", + " \"------\"\n", + " \"Query: {query_str}\"\n", + " \"Answer: \"\n", + ")\n", + "\n", + "DEFAULT_CITATION_CHUNK_SIZE = 512\n", + "DEFAULT_CITATION_CHUNK_OVERLAP = 20" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "09009f1cd892" + }, + "source": [ + "### Workflow Class\n", + "\n", + "The RAGWorkflow() class contains all the steps of the workflow. We define the steps by decorating the method with @step.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "486b4ba78947" + }, + "outputs": [], + "source": [ + "class RAGWorkflow(Workflow):\n", + " @step\n", + " async def ingest_data(\n", + " self, ctx: Context, ev: StartEvent\n", + " ) -> FirestoreIndexData | None:\n", + " \"\"\"Entry point to ingest a document, triggered by a StartEvent with 'dirname'.\"\"\"\n", + " dirname = ev.get(\"dirname\")\n", + " if not dirname:\n", + " return None\n", + "\n", + " documents = SimpleDirectoryReader(dirname).load_data()\n", + " await ctx.set(\"documents\", documents)\n", + " return FirestoreIndexData(\n", + " status=\"First step complete. Data loaded into Documents.\"\n", + " )\n", + "\n", + " @step\n", + " async def load_database(self, ctx: Context, ev: FirestoreIndexData) -> StopEvent:\n", + " print(ev.status)\n", + "\n", + " # create (or load) docstore and add nodes\n", + " docstore = FirestoreDocumentStore.from_database(\n", + " project=PROJECT_ID,\n", + " database=FIRESTORE_DATABASE_ID,\n", + " )\n", + "\n", + " docstore.add_documents(await ctx.get(\"documents\"))\n", + "\n", + " # create storage context\n", + " storage_context = StorageContext.from_defaults(docstore=docstore)\n", + "\n", + " # setup index\n", + " index = VectorStoreIndex.from_documents(\n", + " documents=await ctx.get(\"documents\"), storage_context=storage_context\n", + " )\n", + "\n", + " print(\"Index created\")\n", + " return StopEvent(index)\n", + "\n", + " def combine_queries(\n", + " self,\n", + " query_bundle: QueryBundle,\n", + " prev_reasoning: str,\n", + " llm: LLM,\n", + " ) -> QueryBundle:\n", + " \"\"\"Combine queries using StepDecomposeQueryTransform.\"\"\"\n", + " transform_metadata = {\"prev_reasoning\": prev_reasoning}\n", + " return StepDecomposeQueryTransform(llm=llm)(\n", + " query_bundle, metadata=transform_metadata\n", + " )\n", + "\n", + " def default_stop_fn(self, stop_dict: dict) -> bool:\n", + " \"\"\"Stop function for multi-step query combiner.\"\"\"\n", + " query_bundle = cast(QueryBundle, stop_dict.get(\"query_bundle\"))\n", + " if query_bundle is None:\n", + " raise ValueError(\"Response must be provided to stop function.\")\n", + "\n", + " return \"none\" in query_bundle.query_str.lower()\n", + "\n", + " @step(pass_context=True)\n", + " async def query_multistep(\n", + " self, ctx: Context, ev: StartEvent\n", + " ) -> QueryMultiStepEvent | None:\n", + " \"\"\"Entry point for RAG, triggered by a StartEvent with `query`. Execute multi-step query process.\"\"\"\n", + "\n", + " query = ev.get(\"query\")\n", + " index = ev.get(\"index\")\n", + "\n", + " prev_reasoning = \"\"\n", + " cur_response = None\n", + " should_stop = False\n", + " cur_steps = 0\n", + "\n", + " # use response\n", + " final_response_metadata: dict[str, Any] = {\"sub_qa\": []}\n", + "\n", + " text_chunks = []\n", + " source_nodes = []\n", + "\n", + " stop_fn = self.default_stop_fn\n", + "\n", + " if not query:\n", + " return None\n", + "\n", + " print(f\"Query the database with: {query}\")\n", + "\n", + " # store the query in the global context\n", + " await ctx.set(\"query\", query)\n", + "\n", + " # get the index from the global context\n", + " if index is None:\n", + " print(\"Index is empty, load some documents before querying!\")\n", + " return None\n", + "\n", + " num_steps = ev.get(\"num_steps\")\n", + " query_engine = index.as_query_engine()\n", + "\n", + " while not should_stop:\n", + " if num_steps is not None and cur_steps >= num_steps:\n", + " should_stop = True\n", + " break\n", + " elif should_stop:\n", + " break\n", + "\n", + " updated_query_bundle = self.combine_queries(\n", + " QueryBundle(query_str=query),\n", + " prev_reasoning,\n", + " llm=Settings.llm,\n", + " )\n", + "\n", + " print(\n", + " f\"Created query for the step - {cur_steps} is: {updated_query_bundle}\"\n", + " )\n", + "\n", + " stop_dict = {\"query_bundle\": updated_query_bundle}\n", + " if stop_fn(stop_dict):\n", + " should_stop = True\n", + " break\n", + "\n", + " cur_response = query_engine.query(updated_query_bundle)\n", + "\n", + " # append to response builder\n", + " cur_qa_text = (\n", + " f\"\\nQuestion: {updated_query_bundle.query_str}\\n\"\n", + " f\"Answer: {cur_response!s}\"\n", + " )\n", + " text_chunks.append(cur_qa_text)\n", + " print(\"Source nodes used:\\n\")\n", + " for source_node in cur_response.source_nodes:\n", + " print(source_node)\n", + " source_nodes.append(source_node)\n", + "\n", + " # update metadata\n", + " final_response_metadata[\"sub_qa\"].append(\n", + " (updated_query_bundle.query_str, cur_response)\n", + " )\n", + "\n", + " prev_reasoning += (\n", + " f\"- {updated_query_bundle.query_str}\\n\" f\"- {cur_response!s}\\n\"\n", + " )\n", + " cur_steps += 1\n", + "\n", + " nodes = [\n", + " NodeWithScore(node=TextNode(text=text_chunk)) for text_chunk in text_chunks\n", + " ]\n", + " return QueryMultiStepEvent(\n", + " nodes=nodes,\n", + " source_nodes=source_nodes,\n", + " final_response_metadata=final_response_metadata,\n", + " )\n", + "\n", + " @step\n", + " async def rerank(self, ctx: Context, ev: QueryMultiStepEvent) -> RerankEvent:\n", + " # Rerank the nodes\n", + " ranker = LLMRerank(choice_batch_size=5, top_n=10, llm=Settings.llm)\n", + " print(\"Entering reranking of nodes:\\n\")\n", + " print(\"Original query: \", await ctx.get(\"query\", default=None), flush=True)\n", + " # print(await ctx.get(\"query\", default=None), flush=True)\n", + " try:\n", + " new_nodes = ranker.postprocess_nodes(\n", + " ev.nodes, query_str=await ctx.get(\"query\", default=None)\n", + " )\n", + " except:\n", + " # re ranker is not guaranteed to create parsable output\n", + " new_nodes = ev.nodes\n", + "\n", + " print(f\"Reranked nodes to {len(new_nodes)}\")\n", + " return RerankEvent(\n", + " nodes=new_nodes,\n", + " source_nodes=ev.source_nodes,\n", + " final_response_metadata=ev.final_response_metadata,\n", + " )\n", + "\n", + " @step\n", + " async def create_citation_nodes(self, ev: RerankEvent) -> CreateCitationsEvent:\n", + " \"\"\"\n", + " Modify retrieved nodes to create granular sources for citations.\n", + "\n", + " Takes a list of NodeWithScore objects and splits their content\n", + " into smaller chunks, creating new NodeWithScore objects for each chunk.\n", + " Each new node is labeled as a numbered source, allowing for more precise\n", + " citation in query results.\n", + "\n", + " Args:\n", + " nodes (List[NodeWithScore]): A list of NodeWithScore objects to be processed.\n", + "\n", + " Returns:\n", + " List[NodeWithScore]: A new list of NodeWithScore objects, where each object\n", + " represents a smaller chunk of the original nodes, labeled as a source.\n", + " \"\"\"\n", + " nodes = ev.nodes\n", + "\n", + " new_nodes: list[NodeWithScore] = []\n", + "\n", + " text_splitter = SentenceSplitter(\n", + " chunk_size=DEFAULT_CITATION_CHUNK_SIZE,\n", + " chunk_overlap=DEFAULT_CITATION_CHUNK_OVERLAP,\n", + " )\n", + "\n", + " for node in nodes:\n", + " print(node)\n", + "\n", + " text_chunks = text_splitter.split_text(\n", + " node.node.get_content(metadata_mode=MetadataMode.NONE)\n", + " )\n", + "\n", + " for text_chunk in text_chunks:\n", + " text = f\"Source {len(new_nodes)+1}:\\n{text_chunk}\\n\"\n", + "\n", + " new_node = NodeWithScore(\n", + " node=TextNode.model_validate(node.node), score=node.score\n", + " )\n", + "\n", + " new_node.node.text = text\n", + " new_nodes.append(new_node)\n", + " return CreateCitationsEvent(\n", + " nodes=new_nodes,\n", + " source_nodes=ev.source_nodes,\n", + " final_response_metadata=ev.final_response_metadata,\n", + " )\n", + "\n", + " @step\n", + " async def synthesize(self, ctx: Context, ev: CreateCitationsEvent) -> StopEvent:\n", + " \"\"\"Return a streaming response using reranked nodes.\"\"\"\n", + "\n", + " print(\"Synthesizing final result...\")\n", + "\n", + " response_synthesizer = get_response_synthesizer(\n", + " llm=Vertex(model=\"gemini-1.5-pro\", temperature=0.0, max_tokens=5000),\n", + " text_qa_template=CITATION_QA_TEMPLATE,\n", + " refine_template=CITATION_REFINE_TEMPLATE,\n", + " response_mode=ResponseMode.COMPACT,\n", + " use_async=True,\n", + " )\n", + " query = await ctx.get(\"query\", default=None)\n", + " response = await response_synthesizer.asynthesize(\n", + " query, nodes=ev.nodes, additional_source_nodes=ev.source_nodes\n", + " )\n", + " return StopEvent(result=response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "506c0759eab2" + }, + "outputs": [], + "source": [ + "# optional - generate DAG for workflow created above\n", + "draw_all_possible_flows(workflow=RAGWorkflow, filename=\"multi_step_workflow.html\") # type: ignore" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4eebc39a2b70" + }, + "source": [ + "### Run the workflow" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "d7a98c8d7876" + }, + "outputs": [], + "source": [ + "w = RAGWorkflow(timeout=200)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "0a14595e4e6a" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "First step complete. Data loaded into Documents.\n", + "Index created\n" + ] + } + ], + "source": [ + "# Ingest the documents\n", + "index = await w.run(dirname=\"./data\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "439d69a3e7f7" + }, + "source": [ + "#### Example 1\n", + "Query: \"What is the significance of the green light?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "19ebb8696f71" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Query the database with: What is the significance of the green light?\n", + "Created query for the step - 0 is: What is the significance of the green light?\n", + "Source nodes used:\n", + "\n", + "Node ID: 0eab96dd-33ef-4d5c-a97e-8ca897af48d6\n", + "Text: Its vanished trees, the trees that had made way for Gatsby’s\n", + "house, had once pandered in whispers to the last and greatest of all\n", + "human dreams; for a transitory enchanted moment man must have held his\n", + "breath in the presence of this continent, compelled into an aesthetic\n", + "contemplation he neither understood nor desired, face to face for the\n", + "l...\n", + "Score: 0.540\n", + "\n", + "Node ID: 4b08ce92-cbf0-4469-88a5-8cb3514da22f\n", + "Text: “I’ve got a man in England who buys me clothes. He sends over a\n", + "selection of things at the beginning of each season, spring and fall.”\n", + "He took out a pile of shirts and began throwing them, one by one,\n", + "before us, shirts of sheer linen and thick silk and fine flannel,\n", + "which lost their folds as they fell and covered the table in many-\n", + "coloure...\n", + "Score: 0.525\n", + "\n", + "Created query for the step - 1 is: ## New Question:\n", + "\n", + "**What is the significance of the green light in the context of Gatsby's pursuit of Daisy?** \n", + "\n", + "Source nodes used:\n", + "\n", + "Node ID: f323395e-7546-454a-9f8b-563e73fbb292\n", + "Text: “Old sport, the dance is unimportant.” He wanted nothing less\n", + "of Daisy than that she should go to Tom and say: “I never loved you.”\n", + "After she had obliterated four years with that sentence they could\n", + "decide upon the more practical measures to be taken. One of them was\n", + "that, after she was free, they were to go back to Louisville and be\n", + "marr...\n", + "Score: 0.662\n", + "\n", + "Node ID: a2ec7e02-2983-4da9-b08a-afa1b6cc4216\n", + "Text: “Why didn’t he ask you to arrange a meeting?” “He wants her\n", + "to see his house,” she explained. “And your house is right next\n", + "door.” “Oh!” “I think he half expected her to wander into one of\n", + "his parties, some night,” went on Jordan, “but she never did. Then he\n", + "began asking people casually if they knew her, and I was the first\n", + "one he fo...\n", + "Score: 0.648\n", + "\n", + "Entering reranking of nodes:\n", + "\n", + "Original query: What is the significance of the green light?\n", + "Reranked nodes to 2\n", + "Node ID: c2860521-c9c1-4cab-b7a9-ea1c784506be\n", + "Text: Question: What is the significance of the green light? Answer:\n", + "The green light is a symbol of Gatsby's dream of Daisy. It is the\n", + "light at the end of her dock, which he can see from his house across\n", + "the bay. The green light represents Gatsby's hope for a future with\n", + "Daisy, and his belief that he can recapture the past. However, the\n", + "green light is...\n", + "Score: None\n", + "\n", + "Node ID: 7fe78bba-c870-486e-8f29-0168b09a792e\n", + "Text: Question: ## New Question: **What is the significance of the\n", + "green light in the context of Gatsby's pursuit of Daisy?** Answer:\n", + "## The Green Light: A Symbol of Gatsby's Dreams and Desires The green\n", + "light at the end of Daisy's dock plays a pivotal role in symbolizing\n", + "Gatsby's aspirations and the unattainable nature of his dreams. It\n", + "represent...\n", + "Score: None\n", + "\n", + "Synthesizing final result...\n" + ] + }, + { + "data": { + "text/markdown": [ + "## The Significance of the Green Light in The Great Gatsby\n", + "\n", + "The green light at the end of Daisy's dock holds immense symbolic weight in F. Scott Fitzgerald's *The Great Gatsby*. It represents a multitude of Gatsby's aspirations and desires, while simultaneously highlighting the unattainable nature of his dreams.\n", + "\n", + "**Unrequited Love:** The green light's physical proximity to Gatsby, yet separation by the bay, mirrors the emotional distance between him and Daisy. He yearns for her, but she remains out of reach, symbolizing his unrequited love.\n", + "\n", + "**The Past:** The green light evokes memories of Gatsby's past with Daisy, a time when their love seemed possible. He desperately wants to recapture that lost time and recreate their romance, clinging to the hope of a second chance.\n", + "\n", + "**Hope and Illusion:** The green light embodies Gatsby's unwavering hope for a future with Daisy. He believes that if he can achieve enough wealth and success, he can win her back. However, this hope is ultimately an illusion, as Daisy has moved on and their circumstances have changed.\n", + "\n", + "**The American Dream:** The green light can be interpreted as a symbol of the American Dream, representing Gatsby's relentless pursuit of wealth and social status. He believes that achieving these goals will bring him happiness and allow him to win Daisy's love. However, the novel ultimately suggests that the American Dream is often unattainable and can lead to disillusionment.\n", + "\n", + "**Additional Points:**\n", + "\n", + "* The green light's color reinforces its symbolic meaning. Green often represents hope, growth, and new beginnings, but in this context, it takes on a more melancholic and unattainable quality.\n", + "* The light's flickering nature reflects the instability of Gatsby's dreams and the uncertainty of his future.\n", + "* Gatsby's constant focus on the green light highlights his single-minded obsession with Daisy and his inability to move on from the past.\n", + "\n", + "**Overall, the green light serves as a powerful symbol that encapsulates Gatsby's longing, his yearning for a lost love, and the ultimately unattainable nature of his dreams.**\n", + "\n", + "**Sources:**\n", + "\n", + "* [1] The Great Gatsby by F. Scott Fitzgerald\n", + "* [2] SparkNotes: The Great Gatsby - Symbols, Imagery, Allegory" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Run a query\n", + "NUM_STEPS = 2 # @param {type:\"int\"} represents how many sub-questions generated based on the query\n", + "result = await w.run(\n", + " query=\"What is the significance of the green light?\",\n", + " index=index,\n", + " num_steps=NUM_STEPS,\n", + ")\n", + "\n", + "display(Markdown(f\"{result}\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "da168aa48f73" + }, + "source": [ + "Check the ranked LLM generated sub-question answers used:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "5dd8dab92106" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node ID: c2860521-c9c1-4cab-b7a9-ea1c784506be\n", + "Text: Source 1: Question: What is the significance of the green light?\n", + "Answer: The green light is a symbol of Gatsby's dream of Daisy. It is\n", + "the light at the end of her dock, which he can see from his house\n", + "across the bay. The green light represents Gatsby's hope for a future\n", + "with Daisy, and his belief that he can recapture the past. However,\n", + "the gree...\n", + "Score: None\n", + "\n", + "Node ID: 7fe78bba-c870-486e-8f29-0168b09a792e\n", + "Text: Source 2: Question: ## New Question: **What is the significance\n", + "of the green light in the context of Gatsby's pursuit of Daisy?**\n", + "Answer: ## The Green Light: A Symbol of Gatsby's Dreams and Desires\n", + "The green light at the end of Daisy's dock plays a pivotal role in\n", + "symbolizing Gatsby's aspirations and the unattainable nature of his\n", + "dreams. It...\n", + "Score: None\n", + "\n" + ] + } + ], + "source": [ + "for idx in range(0, NUM_STEPS):\n", + " print(result.source_nodes[idx])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "79eb31af5944" + }, + "source": [ + "Check the citations from the original source used:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "6ddf240120c0" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node ID: 0eab96dd-33ef-4d5c-a97e-8ca897af48d6\n", + "Text: Its vanished trees, the trees that had made way for Gatsby’s\n", + "house, had once pandered in whispers to the last and greatest of all\n", + "human dreams; for a transitory enchanted moment man must have held his\n", + "breath in the presence of this continent, compelled into an aesthetic\n", + "contemplation he neither understood nor desired, face to face for the\n", + "l...\n", + "Score: 0.540\n", + "\n", + "Node ID: 4b08ce92-cbf0-4469-88a5-8cb3514da22f\n", + "Text: “I’ve got a man in England who buys me clothes. He sends over a\n", + "selection of things at the beginning of each season, spring and fall.”\n", + "He took out a pile of shirts and began throwing them, one by one,\n", + "before us, shirts of sheer linen and thick silk and fine flannel,\n", + "which lost their folds as they fell and covered the table in many-\n", + "coloure...\n", + "Score: 0.525\n", + "\n", + "Node ID: f323395e-7546-454a-9f8b-563e73fbb292\n", + "Text: “Old sport, the dance is unimportant.” He wanted nothing less\n", + "of Daisy than that she should go to Tom and say: “I never loved you.”\n", + "After she had obliterated four years with that sentence they could\n", + "decide upon the more practical measures to be taken. One of them was\n", + "that, after she was free, they were to go back to Louisville and be\n", + "marr...\n", + "Score: 0.662\n", + "\n", + "Node ID: a2ec7e02-2983-4da9-b08a-afa1b6cc4216\n", + "Text: “Why didn’t he ask you to arrange a meeting?” “He wants her\n", + "to see his house,” she explained. “And your house is right next\n", + "door.” “Oh!” “I think he half expected her to wander into one of\n", + "his parties, some night,” went on Jordan, “but she never did. Then he\n", + "began asking people casually if they knew her, and I was the first\n", + "one he fo...\n", + "Score: 0.648\n", + "\n" + ] + } + ], + "source": [ + "for idx in range(NUM_STEPS, len(result.source_nodes)):\n", + " print(result.source_nodes[idx])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2dca7a130be9" + }, + "source": [ + "## Cleaning up\n", + "\n", + "To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial.\n", + "\n", + "Otherwise, you can delete the individual resources you created in this tutorial." + ] + } + ], + "metadata": { + "colab": { + "name": "llamaindex_workflows.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/rag-grounding/README.md b/rag-grounding/README.md index 1c128ef498..a9f0f5feec 100644 --- a/rag-grounding/README.md +++ b/rag-grounding/README.md @@ -123,6 +123,7 @@ performance. - **[summarization_large_documents.ipynb](../language/use-cases/document-summarization/summarization_large_documents.ipynb)**, **[summarization_large_documents_langchain.ipynb](../language/use-cases/document-summarization/summarization_large_documents_langchain.ipynb)**: Demonstrate summarizing large documents. +- **[llamaindex_workflows.ipynb](../gemini/orchestration/llamaindex_workflows.ipynb)** Using LlamaIndex Workflows to build an event driven RAG flow. ### Open Models From 4ec1eb0d36c86d7876c37d35b427d8be0c7e53e6 Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Fri, 4 Oct 2024 02:11:10 +0200 Subject: [PATCH 42/76] refactor: rename speech folder (#1209) --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner --- .github/actions/spelling/allow.txt | 2 ++ README.md | 6 +++--- {speech => audio/speech}/README.md | 0 .../speech/getting-started/get_started_with_chirp.ipynb | 8 ++++---- .../speech}/use-cases/storytelling/storytelling.ipynb | 8 ++++---- 5 files changed, 13 insertions(+), 11 deletions(-) rename {speech => audio/speech}/README.md (100%) rename speech/getting-started/speech_recognition.ipynb => audio/speech/getting-started/get_started_with_chirp.ipynb (98%) rename {speech => audio/speech}/use-cases/storytelling/storytelling.ipynb (98%) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index dff17f8f67..4421d752fb 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -344,6 +344,7 @@ bff bigframes bigquery bitcoin +blogs boundings bpa bqml @@ -586,6 +587,7 @@ pietra pii pixmap pkl +playlists plotly plpgsql plt diff --git a/README.md b/README.md index 5b90b283d0..a65cc0ea98 100644 --- a/README.md +++ b/README.md @@ -105,12 +105,12 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor
Speech
- speech/ + audio/
Use this folder if you're interested in building your own solutions from scratch using features from Chirp, a version of Google's Universal Speech Model (USM) on Vertex AI (Vertex AI Chirp API). Sample notebooks, apps, use casesSample notebooks, apps, use cases
@@ -128,7 +128,7 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor RESOURCES.md Learning resources (e.g. blogs, YouTube playlists) about Generative AI on Google CloudResources (e.g. videos, blogposts, learning paths)Resources (e.g. videos, blog posts, learning paths)
diff --git a/speech/README.md b/audio/speech/README.md similarity index 100% rename from speech/README.md rename to audio/speech/README.md diff --git a/speech/getting-started/speech_recognition.ipynb b/audio/speech/getting-started/get_started_with_chirp.ipynb similarity index 98% rename from speech/getting-started/speech_recognition.ipynb rename to audio/speech/getting-started/get_started_with_chirp.ipynb index 9aa95c793e..ce6a4ffa2b 100644 --- a/speech/getting-started/speech_recognition.ipynb +++ b/audio/speech/getting-started/get_started_with_chirp.ipynb @@ -34,18 +34,18 @@ "\n", "\n", " \n", " \n", " - - - - - + - - - + - - + - - + + + + - + - - + - - + -
\n", - " \n", + " \n", " \"Colab Run in Colab\n", " \n", " \n", - " \n", + " \n", " \"GitHub\n", " View on GitHub\n", " \n", " \n", - " \n", + " \n", " \"Vertex\n", " Open in Vertex AI Workbench\n", " \n", @@ -866,7 +866,7 @@ ], "metadata": { "colab": { - "name": "speech_recognition.ipynb", + "name": "get_started_with_chirp.ipynb", "toc_visible": true }, "kernelspec": { diff --git a/speech/use-cases/storytelling/storytelling.ipynb b/audio/speech/use-cases/storytelling/storytelling.ipynb similarity index 98% rename from speech/use-cases/storytelling/storytelling.ipynb rename to audio/speech/use-cases/storytelling/storytelling.ipynb index e27d57e4c3..023764063f 100644 --- a/speech/use-cases/storytelling/storytelling.ipynb +++ b/audio/speech/use-cases/storytelling/storytelling.ipynb @@ -33,22 +33,22 @@ "\n", "\n", " \n", " \n", " \n", " \n", From df1cadf765011d2f3ea2a65d678a9e2ccd3c7907 Mon Sep 17 00:00:00 2001 From: Krishna Vadakattu Date: Fri, 4 Oct 2024 18:37:09 +0100 Subject: [PATCH 43/76] fix: asyncio import, removed model revision pinning and table grouping arg (#1207) Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Co-authored-by: Holt Skinner --- .../youtube_video_analysis.ipynb | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb index cb553a2d01..515fda4102 100644 --- a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb +++ b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb @@ -238,7 +238,6 @@ "outputs": [], "source": [ "import json\n", - "import time\n", "\n", "from IPython.display import HTML, Markdown, display\n", "from itables import show\n", @@ -291,8 +290,8 @@ "outputs": [], "source": [ "# Set Gemini Flash and Pro models to be used in this notebook\n", - "GEMINI_FLASH_MODEL_ID = \"gemini-1.5-flash-002\"\n", - "GEMINI_PRO_MODEL_ID = \"gemini-1.5-pro-002\"\n", + "GEMINI_FLASH_MODEL_ID = \"gemini-1.5-flash\"\n", + "GEMINI_PRO_MODEL_ID = \"gemini-1.5-pro\"\n", "\n", "gemini_flash_model = GenerativeModel(GEMINI_FLASH_MODEL_ID)\n", "gemini_pro_model = GenerativeModel(GEMINI_PRO_MODEL_ID)" @@ -987,22 +986,16 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": { "id": "4191dc30d77a" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Elapsed time: 63.80 seconds\n" - ] - } - ], + "outputs": [], "source": [ - "# Perform asynhronous calls across all videos, gather responses\n", - "start_time = time.time()\n", + "# Perform asynchronous calls across all videos, gather responses\n", + "import asyncio\n", + "\n", + "start_time = asyncio.get_event_loop().time()\n", "\n", "get_responses = [\n", " async_generate(multiple_video_extraction_prompt, yt_link)\n", @@ -1011,7 +1004,7 @@ "\n", "multiple_video_extraction_responses = await asyncio.gather(*get_responses)\n", "\n", - "end_time = time.time()\n", + "end_time = asyncio.get_event_loop().time()\n", "\n", "elapsed_time = end_time - start_time\n", "\n", @@ -1358,8 +1351,7 @@ " # Count # of diff years in which each athlete/team appears in video\n", " \"num_years\": x[\"year\"].nunique(),\n", " }\n", - " ),\n", - " include_groups=False,\n", + " )\n", " )\n", " .reset_index()\n", " .\n", From d7e0cc609b6668a5a5fbe1676a3a1070794207a7 Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Fri, 4 Oct 2024 20:37:50 +0200 Subject: [PATCH 44/76] feat: chirp 2 notebook (#1210) # Description This tutorial shows how to use Chirp 2, the latest generation of Google's multilingual ASR-specific models. Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .github/actions/spelling/allow.txt | 6 + .../get_started_with_chirp_2_sdk.ipynb | 1343 +++++++++++++++++ ...ipynb => get_started_with_chirp_sdk.ipynb} | 8 +- 3 files changed, 1353 insertions(+), 4 deletions(-) create mode 100644 audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb rename audio/speech/getting-started/{get_started_with_chirp.ipynb => get_started_with_chirp_sdk.ipynb} (99%) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 4421d752fb..f4e3efaddf 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -212,6 +212,7 @@ QPM Qwiklabs RAGAS RLHF +RNNs ROOTSPAN RRF RTN @@ -353,6 +354,7 @@ caudatus caxis cce cctv +cer cfbundle chatbots chromadb @@ -491,6 +493,7 @@ imageno imdb imshow iostream +ipd iphoneos ipykernel ipynb @@ -501,6 +504,7 @@ iterrows ivf jegadesh jetbrains +jiwer jsonify jsonlines jupyter @@ -607,6 +611,7 @@ pvc pyautogen pybind pydantic +pydub pymupdf pypdf pyplot @@ -656,6 +661,7 @@ stdcall strdupv streamlit strfreev +stt stuffie subviews subword diff --git a/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb b/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb new file mode 100644 index 0000000000..4976608f8b --- /dev/null +++ b/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb @@ -0,0 +1,1343 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Get started with Chirp 2 using Speech-to-Text V2 SDK\n", + "\n", + "
\n", - " \n", + " \n", " \"Google
Run in Colab\n", "
\n", "
\n", - " \n", + " \n", " \"Google
Run in Colab Enterprise\n", "
\n", "
\n", - " \n", + " \n", " \"Vertex
Open in Vertex AI Workbench\n", "
\n", "
\n", - " \n", + " \n", " \"GitHub
View on GitHub\n", "
\n", "
\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Ivan Nardini](https://github.com/inardini) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "In this tutorial, you learn about how to use Chirp 2, the latest generation of Google's multilingual ASR-specific models.\n", + "\n", + "Chirp 2 improves upon the original Chirp model in accuracy and speed, as well as expanding into key new features like word-level timestamps, model adaptation, and speech translation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Speech-to-Text SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "e73_ZgKWYedz" + }, + "outputs": [], + "source": [ + "! apt update -y -qq\n", + "! apt install ffmpeg -y -qq" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --quiet 'google-cloud-speech' 'protobuf<4.21' 'google-auth==2.27.0' 'pydub' 'etils' 'jiwer' 'ffmpeg-python' 'plotly'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Speech-to-Text V2 SDK\n", + "\n", + "To get started using the Speech-to-Text API, you must have an existing Google Cloud project and [enable the Speech-to-Text API](https://console.cloud.google.com/flows/enableapi?apiid=speech.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WIQyBhAn_9tK" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "from google.api_core.client_options import ClientOptions\n", + "from google.cloud.speech_v2 import SpeechClient\n", + "\n", + "API_ENDPOINT = f\"{LOCATION}-speech.googleapis.com\"\n", + "\n", + "client = SpeechClient(\n", + " client_options=ClientOptions(\n", + " api_endpoint=API_ENDPOINT,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zgPO1eR3CYjk" + }, + "source": [ + "### Create a Cloud Storage bucket\n", + "\n", + "Create a storage bucket to store intermediate artifacts such as datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MzGDU7TWdts_" + }, + "outputs": [], + "source": [ + "BUCKET_NAME = \"your-bucket-name-unique\" # @param {type:\"string\", isTemplate: true}\n", + "\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-EcIXiGsCePi" + }, + "source": [ + "**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NIq7R4HZCfIc" + }, + "outputs": [], + "source": [ + "! gsutil mb -l $LOCATION -p $PROJECT_ID $BUCKET_URI" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "from google.cloud.speech_v2.types import cloud_speech" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qqm0OQpAYCph" + }, + "outputs": [], + "source": [ + "import io\n", + "import os\n", + "import subprocess\n", + "import time\n", + "\n", + "import IPython.display as ipd\n", + "from etils import epath as ep\n", + "import jiwer\n", + "import pandas as pd\n", + "import plotly.graph_objs as go\n", + "from pydub import AudioSegment" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sP8GBj3tBAC1" + }, + "source": [ + "### Set constants" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rXTVeU1uBBqY" + }, + "outputs": [], + "source": [ + "INPUT_AUDIO_SAMPLE_FILE_URI = (\n", + " \"gs://github-repo/audio_ai/speech_recognition/attention_is_all_you_need_podcast.wav\"\n", + ")\n", + "INPUT_LONG_AUDIO_SAMPLE_FILE_URI = (\n", + " f\"{BUCKET_URI}/speech_recognition/data/long_audio_sample.wav\"\n", + ")\n", + "\n", + "RECOGNIZER = client.recognizer_path(PROJECT_ID, LOCATION, \"_\")\n", + "\n", + "MAX_CHUNK_SIZE = 25600" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "djgFxrGC_Ykd" + }, + "source": [ + "### Helpers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Zih8W_wC_caW" + }, + "outputs": [], + "source": [ + "def read_audio_file(audio_file_path: str) -> bytes:\n", + " \"\"\"\n", + " Read audio file as bytes.\n", + " \"\"\"\n", + " if audio_file_path.startswith(\"gs://\"):\n", + " with ep.Path(audio_file_path).open(\"rb\") as f:\n", + " audio_bytes = f.read()\n", + " else:\n", + " with open(audio_file_path, \"rb\") as f:\n", + " audio_bytes = f.read()\n", + " return audio_bytes\n", + "\n", + "\n", + "def save_audio_sample(audio_bytes: bytes, output_file_uri: str) -> None:\n", + " \"\"\"\n", + " Save audio sample as a file in Google Cloud Storage.\n", + " \"\"\"\n", + "\n", + " output_file_path = ep.Path(output_file_uri)\n", + " if not output_file_path.parent.exists():\n", + " output_file_path.parent.mkdir(parents=True, exist_ok=True)\n", + "\n", + " with output_file_path.open(\"wb\") as f:\n", + " f.write(audio_bytes)\n", + "\n", + "\n", + "def extract_audio_sample(audio_bytes: bytes, duration: int) -> bytes:\n", + " \"\"\"\n", + " Extracts a random audio sample of a given duration from an audio file.\n", + " \"\"\"\n", + " audio = AudioSegment.from_file(io.BytesIO(audio_bytes))\n", + " start_time = 0\n", + " audio_sample = audio[start_time : start_time + duration * 1000]\n", + "\n", + " audio_bytes = io.BytesIO()\n", + " audio_sample.export(audio_bytes, format=\"wav\")\n", + " audio_bytes.seek(0)\n", + "\n", + " return audio_bytes.read()\n", + "\n", + "\n", + "def play_audio_sample(audio_bytes: bytes) -> None:\n", + " \"\"\"\n", + " Plays the audio sample in a notebook.\n", + " \"\"\"\n", + " audio_file = io.BytesIO(audio_bytes)\n", + " ipd.display(ipd.Audio(audio_file.read(), rate=44100))\n", + "\n", + "\n", + "def audio_sample_chunk_n(audio_bytes: bytes, num_chunks: int) -> list[bytes]:\n", + " \"\"\"\n", + " Chunks an audio sample into a specified number of chunks and returns a list of bytes for each chunk.\n", + " \"\"\"\n", + " audio = AudioSegment.from_file(io.BytesIO(audio_bytes))\n", + " total_duration = len(audio)\n", + " chunk_duration = total_duration // num_chunks\n", + "\n", + " chunks = []\n", + " start_time = 0\n", + "\n", + " for _ in range(num_chunks):\n", + " end_time = min(start_time + chunk_duration, total_duration)\n", + " chunk = audio[start_time:end_time]\n", + "\n", + " audio_bytes_chunk = io.BytesIO()\n", + " chunk.export(audio_bytes_chunk, format=\"wav\")\n", + " audio_bytes_chunk.seek(0)\n", + " chunks.append(audio_bytes_chunk.read())\n", + "\n", + " start_time = end_time\n", + "\n", + " return chunks\n", + "\n", + "\n", + "def audio_sample_merge(audio_chunks: list[bytes]) -> bytes:\n", + " \"\"\"\n", + " Merges a list of audio chunks into a single audio sample.\n", + " \"\"\"\n", + " audio = AudioSegment.empty()\n", + " for chunk in audio_chunks:\n", + " audio += AudioSegment.from_file(io.BytesIO(chunk))\n", + "\n", + " audio_bytes = io.BytesIO()\n", + " audio.export(audio_bytes, format=\"wav\")\n", + " audio_bytes.seek(0)\n", + "\n", + " return audio_bytes.read()\n", + "\n", + "\n", + "def compress_for_streaming(audio_bytes: bytes) -> bytes:\n", + " \"\"\"\n", + " Compresses audio bytes for streaming using ffmpeg, ensuring the output size is under 25600 bytes.\n", + " \"\"\"\n", + "\n", + " # Temporary file to store original audio\n", + " with open(\"temp_original.wav\", \"wb\") as f:\n", + " f.write(audio_bytes)\n", + "\n", + " # Initial compression attempt with moderate bitrate\n", + " bitrate = \"32k\"\n", + " subprocess.run(\n", + " [\n", + " \"ffmpeg\",\n", + " \"-i\",\n", + " \"temp_original.wav\",\n", + " \"-b:a\",\n", + " bitrate,\n", + " \"-y\",\n", + " \"temp_compressed.mp3\",\n", + " ]\n", + " )\n", + "\n", + " # Check if compressed size is within limit\n", + " compressed_size = os.path.getsize(\"temp_compressed.mp3\")\n", + " if compressed_size <= 25600:\n", + " with open(\"temp_compressed.mp3\", \"rb\") as f:\n", + " compressed_audio_bytes = f.read()\n", + " else:\n", + " # If too large, reduce bitrate and retry\n", + " while compressed_size > 25600:\n", + " bitrate = str(int(bitrate[:-1]) - 8) + \"k\" # Reduce bitrate by 8kbps\n", + " subprocess.run(\n", + " [\n", + " \"ffmpeg\",\n", + " \"-i\",\n", + " \"temp_original.wav\",\n", + " \"-b:a\",\n", + " bitrate,\n", + " \"-y\",\n", + " \"temp_compressed.mp3\",\n", + " ]\n", + " )\n", + " compressed_size = os.path.getsize(\"temp_compressed.mp3\")\n", + "\n", + " with open(\"temp_compressed.mp3\", \"rb\") as f:\n", + " compressed_audio_bytes = f.read()\n", + "\n", + " # Clean up temporary files\n", + " os.remove(\"temp_original.wav\")\n", + " os.remove(\"temp_compressed.mp3\")\n", + "\n", + " return compressed_audio_bytes\n", + "\n", + "\n", + "def parse_streaming_recognize_response(response) -> list[tuple[str, int]]:\n", + " \"\"\"Parse streaming responses from the Speech-to-Text API\"\"\"\n", + " streaming_recognize_results = []\n", + " for r in response:\n", + " for result in r.results:\n", + " streaming_recognize_results.append(\n", + " (result.alternatives[0].transcript, result.result_end_offset)\n", + " )\n", + " return streaming_recognize_results\n", + "\n", + "\n", + "def parse_real_time_recognize_response(response) -> list[tuple[str, int]]:\n", + " \"\"\"Parse real-time responses from the Speech-to-Text API\"\"\"\n", + " real_time_recognize_results = []\n", + " for result in response.results:\n", + " real_time_recognize_results.append(\n", + " (result.alternatives[0].transcript, result.result_end_offset)\n", + " )\n", + " return real_time_recognize_results\n", + "\n", + "\n", + "def parse_batch_recognize_response(\n", + " response, audio_sample_file_uri: str = INPUT_LONG_AUDIO_SAMPLE_FILE_URI\n", + ") -> list[tuple[str, int]]:\n", + " \"\"\"Parse batch responses from the Speech-to-Text API\"\"\"\n", + " batch_recognize_results = []\n", + " for result in response.results[\n", + " audio_sample_file_uri\n", + " ].inline_result.transcript.results:\n", + " batch_recognize_results.append(\n", + " (result.alternatives[0].transcript, result.result_end_offset)\n", + " )\n", + " return batch_recognize_results\n", + "\n", + "\n", + "def get_recognize_output(\n", + " audio_bytes: bytes, recognize_results: list[tuple[str, int]]\n", + ") -> list[tuple[bytes, str]]:\n", + " \"\"\"\n", + " Get the output of recognize results, handling 0 timedelta and ensuring no overlaps or gaps.\n", + " \"\"\"\n", + " audio = AudioSegment.from_file(io.BytesIO(audio_bytes))\n", + " recognize_output = []\n", + " start_time = 0\n", + "\n", + " initial_end_time = recognize_results[0][1].total_seconds() * 1000\n", + "\n", + " # This loop handles the streaming case where result timestamps might be zero.\n", + " if initial_end_time == 0:\n", + " for i, (transcript, timedelta) in enumerate(recognize_results):\n", + " if i < len(recognize_results) - 1:\n", + " # Use the next timedelta if available\n", + " next_end_time = recognize_results[i + 1][1].total_seconds() * 1000\n", + " end_time = next_end_time\n", + " else:\n", + " next_end_time = len(audio)\n", + " end_time = next_end_time\n", + "\n", + " # Ensure no gaps between chunks\n", + " chunk = audio[start_time:end_time]\n", + " chunk_bytes = io.BytesIO()\n", + " chunk.export(chunk_bytes, format=\"wav\")\n", + " chunk_bytes.seek(0)\n", + " recognize_output.append((chunk_bytes.read(), transcript))\n", + "\n", + " # Set start_time for the next iteration\n", + " start_time = end_time\n", + " else:\n", + " for i, (transcript, timedelta) in enumerate(recognize_results):\n", + " # Calculate end_time in milliseconds\n", + " end_time = timedelta.total_seconds() * 1000\n", + "\n", + " # Ensure no gaps between chunks\n", + " chunk = audio[start_time:end_time]\n", + " chunk_bytes = io.BytesIO()\n", + " chunk.export(chunk_bytes, format=\"wav\")\n", + " chunk_bytes.seek(0)\n", + " recognize_output.append((chunk_bytes.read(), transcript))\n", + "\n", + " # Set start_time for the next iteration\n", + " start_time = end_time\n", + "\n", + " return recognize_output\n", + "\n", + "\n", + "def print_transcription(audio_sample_bytes: bytes, transcription: str) -> None:\n", + " \"\"\"Prettify the play of the audio and the associated print of the transcription text in a notebook\"\"\"\n", + "\n", + " # Play the audio sample\n", + " display(ipd.HTML(\"Audio:\"))\n", + " play_audio_sample(audio_sample_bytes)\n", + " display(ipd.HTML(\"
\"))\n", + "\n", + " # Display the transcription text\n", + " display(ipd.HTML(\"Transcription:\"))\n", + " formatted_text = f\"
{transcription}
\"\n", + " display(ipd.HTML(formatted_text))\n", + "\n", + "\n", + "def evaluate_stt(\n", + " actual_transcriptions: list[str],\n", + " reference_transcriptions: list[str],\n", + " audio_sample_file_uri: str = INPUT_LONG_AUDIO_SAMPLE_FILE_URI,\n", + ") -> pd.DataFrame:\n", + " \"\"\"\n", + " Evaluate speech-to-text (STT) transcriptions against reference transcriptions.\n", + " \"\"\"\n", + " audio_uris = [audio_sample_file_uri] * len(actual_transcriptions)\n", + " evaluations = []\n", + " for audio_uri, actual_transcription, reference_transcription in zip(\n", + " audio_uris, actual_transcriptions, reference_transcriptions\n", + " ):\n", + " evaluation = {\n", + " \"audio_uri\": audio_uri,\n", + " \"actual_transcription\": actual_transcription,\n", + " \"reference_transcription\": reference_transcription,\n", + " \"wer\": jiwer.wer(reference_transcription, actual_transcription),\n", + " \"cer\": jiwer.cer(reference_transcription, actual_transcription),\n", + " }\n", + " evaluations.append(evaluation)\n", + "\n", + " evaluations_df = pd.DataFrame(evaluations)\n", + " evaluations_df.reset_index(inplace=True, drop=True)\n", + " return evaluations_df\n", + "\n", + "\n", + "def plot_evaluation_results(\n", + " evaluations_df: pd.DataFrame,\n", + ") -> go.Figure:\n", + " \"\"\"\n", + " Plot the mean Word Error Rate (WER) and Character Error Rate (CER) from the evaluation results.\n", + " \"\"\"\n", + " mean_wer = evaluations_df[\"wer\"].mean()\n", + " mean_cer = evaluations_df[\"cer\"].mean()\n", + "\n", + " trace_means = go.Bar(\n", + " x=[\"WER\", \"CER\"], y=[mean_wer, mean_cer], name=\"Mean Error Rate\"\n", + " )\n", + "\n", + " trace_baseline = go.Scatter(\n", + " x=[\"WER\", \"CER\"], y=[0.5, 0.5], mode=\"lines\", name=\"Baseline (0.5)\"\n", + " )\n", + "\n", + " layout = go.Layout(\n", + " title=\"Speech-to-Text Evaluation Results\",\n", + " xaxis=dict(title=\"Metric\"),\n", + " yaxis=dict(title=\"Error Rate\", range=[0, 1]),\n", + " barmode=\"group\",\n", + " )\n", + "\n", + " fig = go.Figure(data=[trace_means, trace_baseline], layout=layout)\n", + " return fig" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VPVDNRyVxquo" + }, + "source": [ + "## Transcribe using Chirp 2\n", + "\n", + "You can use Chirp 2 to transcribe audio in Streaming, Online and Batch modes:\n", + "\n", + "* Streaming mode is good for streaming and real-time audio. \n", + "* Online mode is good for short audio < 1 min.\n", + "* Batch mode is good for long audio 1 min to 8 hrs. \n", + "\n", + "In the following sections, you explore how to use the API to transcribe audio in these three different scenarios." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4uTeBXo6dZlS" + }, + "source": [ + "### Read the audio file\n", + "\n", + "Let's start reading the input audio sample you want to transcribe.\n", + "\n", + "In this case, it is a podcast generated with NotebookLM about the \"Attention is all you need\" [paper](https://arxiv.org/abs/1706.03762)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pjzwMWqpdldM" + }, + "outputs": [], + "source": [ + "input_audio_bytes = read_audio_file(INPUT_AUDIO_SAMPLE_FILE_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SyEUpcf12z73" + }, + "source": [ + "### Prepare audio samples\n", + "\n", + "The podcast audio is ~ 8 mins. Depending on the audio length, you can use different transcribe API methods. To learn more, check out the official documentation. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TRlgCdED793U" + }, + "source": [ + "#### Prepare a short audio sample (< 1 min)\n", + "\n", + "Extract a short audio sample from the original one for streaming and real-time audio processing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "r-SYb9_b87BZ" + }, + "outputs": [], + "source": [ + "short_audio_sample_bytes = extract_audio_sample(input_audio_bytes, 30)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0Hk2OSiSEFrf" + }, + "outputs": [], + "source": [ + "play_audio_sample(short_audio_sample_bytes)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2rPcMe0LvC3q" + }, + "source": [ + "#### Prepare a long audio sample (from 1 min up to 8 hrs)\n", + "\n", + "Extract a longer audio sample from the original one for batch audio processing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "L44FoygqvHoP" + }, + "outputs": [], + "source": [ + "long_audio_sample_bytes = extract_audio_sample(input_audio_bytes, 120)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ej2j0FBEvK6s" + }, + "outputs": [], + "source": [ + "play_audio_sample(long_audio_sample_bytes)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6tIbVVe76ML8" + }, + "outputs": [], + "source": [ + "save_audio_sample(long_audio_sample_bytes, INPUT_LONG_AUDIO_SAMPLE_FILE_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w5qPg2OfFAG9" + }, + "source": [ + "### Perform streaming speech recognition\n", + "\n", + "Let's start performing streaming speech recognition." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aAlIgQSoeDT5" + }, + "source": [ + "#### Prepare the audio stream\n", + "\n", + "To simulate an audio stream, you can create a generator yielding chunks of audio data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j5SPyum6FMiC" + }, + "outputs": [], + "source": [ + "stream = [\n", + " compress_for_streaming(audio_chuck)\n", + " for audio_chuck in audio_sample_chunk_n(short_audio_sample_bytes, num_chunks=5)\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7dDap26FiKlL" + }, + "outputs": [], + "source": [ + "for s in stream:\n", + " play_audio_sample(s)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9z1XGzpxeAMP" + }, + "source": [ + "#### Prepare the stream request\n", + "\n", + "Once you have your audio stream, you can use the `StreamingRecognizeRequest`class to convert each stream component into a API message." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "IOZNYPrfeW49" + }, + "outputs": [], + "source": [ + "audio_requests = (cloud_speech.StreamingRecognizeRequest(audio=s) for s in stream)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oPbf5rNFecI_" + }, + "source": [ + "#### Define streaming recognition configuration\n", + "\n", + "Next, you define the streaming recognition configuration which allows you to set the model to use, language code of the audio and more." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "32Wz990perAo" + }, + "outputs": [], + "source": [ + "streaming_config = cloud_speech.StreamingRecognitionConfig(\n", + " config=cloud_speech.RecognitionConfig(\n", + " language_codes=[\"en-US\"],\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zVRyTqhWe2gf" + }, + "source": [ + "#### Define the streaming request configuration\n", + "\n", + "Then, you use the streaming configuration to define the streaming request. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "t5qiUJ48e9i5" + }, + "outputs": [], + "source": [ + "stream_request_config = cloud_speech.StreamingRecognizeRequest(\n", + " streaming_config=streaming_config, recognizer=RECOGNIZER\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "h1d508ScfD9I" + }, + "source": [ + "#### Run the streaming recognition request\n", + "\n", + "Finally, you are able to run the streaming recognition request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QCq-iROpfl9t" + }, + "outputs": [], + "source": [ + "def requests(request_config: cloud_speech.RecognitionConfig, s: list) -> list:\n", + " yield request_config\n", + " yield from s\n", + "\n", + "\n", + "response = client.streaming_recognize(\n", + " requests=requests(stream_request_config, audio_requests)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d__QUGkWCkGh" + }, + "source": [ + "Here you use a helper function to visualize transcriptions and the associated streams." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_qWA8jXYuMH3" + }, + "outputs": [], + "source": [ + "streaming_recognize_results = parse_streaming_recognize_response(response)\n", + "streaming_recognize_output = get_recognize_output(\n", + " short_audio_sample_bytes, streaming_recognize_results\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "agk_M0xRwzv0" + }, + "outputs": [], + "source": [ + "for audio_sample_bytes, transcription in streaming_recognize_output:\n", + " print_transcription(audio_sample_bytes, transcription)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oYCgDay2hAgB" + }, + "source": [ + "### Perform real-time speech recognition" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F83r9aiNhAgD" + }, + "source": [ + "#### Define real-time recognition configuration\n", + "\n", + "As for the streaming transcription, you define the real-time recognition configuration which allows you to set the model to use, language code of the audio and more." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j0WprZ-phAgD" + }, + "outputs": [], + "source": [ + "real_time_config = cloud_speech.RecognitionConfig(\n", + " language_codes=[\"en-US\"],\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r2TqksAqhAgD" + }, + "source": [ + "#### Define the real-time request configuration\n", + "\n", + "Next, you define the real-time request passing the configuration and the audio sample you want to transcribe. Again, you don't need to define a recognizer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nh55mSzXhAgD" + }, + "outputs": [], + "source": [ + "real_time_request = cloud_speech.RecognizeRequest(\n", + " recognizer=f\"projects/{PROJECT_ID}/locations/{LOCATION}/recognizers/_\",\n", + " config=real_time_config,\n", + " content=short_audio_sample_bytes,\n", + " recognizer=RECOGNIZER,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "817YXVBli0aY" + }, + "source": [ + "#### Run the real-time recognition request\n", + "\n", + "Finally you submit the real-time recognition request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rc0cBrVsi7UG" + }, + "outputs": [], + "source": [ + "response = client.recognize(request=real_time_request)\n", + "\n", + "real_time_recognize_results = parse_real_time_recognize_response(response)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J2vpMSv7CZ_2" + }, + "source": [ + "And you use a helper function to visualize transcriptions and the associated streams." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ezH51rLH4CBR" + }, + "outputs": [], + "source": [ + "for transcription, _ in real_time_recognize_results:\n", + " print_transcription(short_audio_sample_bytes, transcription)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5M-lIwRJ43EC" + }, + "source": [ + "### Perform batch speech recognition" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LJxhFSg848MO" + }, + "source": [ + "#### Define batch recognition configuration\n", + "\n", + "You start defining the batch recognition configuration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0CEQUL5_5BT-" + }, + "outputs": [], + "source": [ + "batch_recognition_config = cloud_speech.RecognitionConfig(\n", + " language_codes=[\"en-US\"],\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SKf3pMBl5E4f" + }, + "source": [ + "#### Set the audio file you want to transcribe\n", + "\n", + "For the batch transcription, you need the audio be staged in a Cloud Storage bucket. Then you set the associated metadata to pass in the batch recognition request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "o1VCvEEI5MkG" + }, + "outputs": [], + "source": [ + "audio_metadata = cloud_speech.BatchRecognizeFileMetadata(\n", + " uri=INPUT_LONG_AUDIO_SAMPLE_FILE_URI\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5HOKZLp25yFB" + }, + "source": [ + "#### Define batch recognition request\n", + "\n", + "Next, you define the batch recognition request. Notice how you define a recognition output configuration which allows you to determine how would you retrieve the resulting transcription outcome." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SItkaX7tyZ14" + }, + "outputs": [], + "source": [ + "batch_recognition_request = cloud_speech.BatchRecognizeRequest(\n", + " config=batch_recognition_config,\n", + " files=[audio_metadata],\n", + " recognition_output_config=cloud_speech.RecognitionOutputConfig(\n", + " inline_response_config=cloud_speech.InlineOutputConfig(),\n", + " ),\n", + " recognizer=RECOGNIZER,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YQY1eqaY7H0n" + }, + "source": [ + "#### Run the batch recognition request\n", + "\n", + "Finally you submit the batch recognition request which is a [long-running operation](https://google.aip.dev/151) as you see below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AlZwRlLo6F1p" + }, + "outputs": [], + "source": [ + "operation = client.batch_recognize(request=batch_recognition_request)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DrqsNzVmeWu0" + }, + "outputs": [], + "source": [ + "while True:\n", + " if not operation.done():\n", + " print(\"Waiting for operation to complete...\")\n", + " time.sleep(5)\n", + " else:\n", + " print(\"Operation completed.\")\n", + " break" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B9MEScw7FYAf" + }, + "source": [ + "After the operation finishes, you can retrieve the result as shown below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pjObiPUweZYA" + }, + "outputs": [], + "source": [ + "response = operation.result()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "31cMuwXZFdgI" + }, + "source": [ + "And visualize transcriptions using a helper function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d0eMjC3Kmo-5" + }, + "outputs": [], + "source": [ + "batch_recognize_results = parse_batch_recognize_response(\n", + " response, audio_sample_file_uri=INPUT_LONG_AUDIO_SAMPLE_FILE_URI\n", + ")\n", + "batch_recognize_output = get_recognize_output(\n", + " long_audio_sample_bytes, batch_recognize_results\n", + ")\n", + "for audio_sample_bytes, transcription in batch_recognize_output:\n", + " print_transcription(audio_sample_bytes, transcription)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "teU52ISxqUQd" + }, + "source": [ + "### Evaluate transcriptions\n", + "\n", + "Finally, you may want to evaluate Chirp transcriptions. To do so, you can use [JiWER](https://github.com/jitsi/jiwer), a simple and fast Python package which supports several metrics. In this tutorial, you use:\n", + "\n", + "- **WER (Word Error Rate)** which is the most common metric. WER is the number of word edits (insertions, deletions, substitutions) needed to change the recognized text to match the reference text, divided by the total number of words in the reference text.\n", + "- **CER (Character Error Rate)** which is the number of character edits (insertions, deletions, substitutions) needed to change the recognized text to match the reference text, divided by the total number of characters in the reference text." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "q1u3g4LnqX6z" + }, + "outputs": [], + "source": [ + "actual_transcriptions = [t for _, t in batch_recognize_output]\n", + "reference_transcriptions = [\n", + " \"\"\"Okay, so, you know, everyone's been talking about AI lately, right? Writing poems, like nailing those tricky emails, even building websites and all you need is a few, what do they call it again? Prompts? Yeah, it's wild. These AI tools are suddenly everywhere. It's hard to keep up. Seriously. But here's the thing, a lot of this AI stuff we're seeing, it all goes back to this one research paper from way back in 2017. Attention is all you need. So, today we're doing a deep dive into the core of it. The engine that's kind of driving\"\"\",\n", + " \"\"\"all this change. The Transformer. It's funny, right? This super technical paper, I mean, it really did change how we think about AI and how it uses language. Totally. It's like it, I don't know, cracked a code or something. So, before we get into the transformer, we need to like paint that before picture. Can you take us back to how AI used to deal with language before this whole transformer thing came along? Okay. So, imagine this. You're trying to understand a story, but you can only read like one word at a time. Ouch. Right. And not only that, but you also\"\"\",\n", + " \"\"\"have to like remember every single word you read before just to understand the word you're on right now. That sounds so frustrating, like trying to get a movie by looking at one pixel at a time. Exactly. And that's basically how old AI models used to work. RNNs, recurrent neural networks, they processed language one word after the other, which, you can imagine, was super slow and not that great at handling how, you know, language actually works. So, like remembering how the start of a sentence connects\"\"\",\n", + " \"\"\"to the end or how something that happens at the beginning of a book affects what happens later on. That was really tough for older AI. Totally. It's like trying to get a joke by only remembering the punch line. You miss all the important stuff, all that context. Okay, yeah. I'm starting to see why this paper was such a big deal. So how did \"Attention Is All You Need\" change everything? What's so special about this Transformer thing? Well, I mean, even the title is a good hint, right? It's all about attention. This paper introduced self-attention. Basically, it's how the\"\"\",\n", + "]\n", + "\n", + "evaluation_df = evaluate_stt(actual_transcriptions, reference_transcriptions)\n", + "plot_evaluation_results(evaluation_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a4e033321ad" + }, + "source": [ + "## Cleaning up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5bsE-XtXzmpR" + }, + "outputs": [], + "source": [ + "delete_bucket = False\n", + "\n", + "if delete_bucket:\n", + " ! gsutil rm -r $BUCKET_URI" + ] + } + ], + "metadata": { + "colab": { + "name": "get_started_with_chirp_2_sdk.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/audio/speech/getting-started/get_started_with_chirp.ipynb b/audio/speech/getting-started/get_started_with_chirp_sdk.ipynb similarity index 99% rename from audio/speech/getting-started/get_started_with_chirp.ipynb rename to audio/speech/getting-started/get_started_with_chirp_sdk.ipynb index ce6a4ffa2b..7a7c8fe9df 100644 --- a/audio/speech/getting-started/get_started_with_chirp.ipynb +++ b/audio/speech/getting-started/get_started_with_chirp_sdk.ipynb @@ -34,18 +34,18 @@ "\n", "\n", " \n", " \n", " \n", "
\n", - " \n", + " \n", " \"Colab Run in Colab\n", " \n", " \n", - " \n", + " \n", " \"GitHub\n", " View on GitHub\n", " \n", " \n", - " \n", + " \n", " \"Vertex\n", " Open in Vertex AI Workbench\n", " \n", @@ -866,7 +866,7 @@ ], "metadata": { "colab": { - "name": "get_started_with_chirp.ipynb", + "name": "get_started_with_chirp_sdk.ipynb", "toc_visible": true }, "kernelspec": { From 8fd9d89d0581a44e9467cbd5d83127fd6c82b54d Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:20:25 -0500 Subject: [PATCH 45/76] fix: Broken Links (#1216) Fixes #1212 Fixes #1214 Fixes #1215 --- audio/speech/README.md | 3 ++- gemini/sample-apps/photo-discovery/README.md | 9 ++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/audio/speech/README.md b/audio/speech/README.md index 995f6c3949..ddd921ba8e 100644 --- a/audio/speech/README.md +++ b/audio/speech/README.md @@ -7,7 +7,8 @@ This repository explores various use-cases and implementations of speech recogni Here's a breakdown of the content available: - **Getting Started:** - - [speech_recognition.ipynb](getting-started/speech_recognition.ipynb): This Jupyter Notebook provides a basic introduction to performing speech recognition using Google Cloud's Speech-to-Text API. + - [get_started_with_chirp_sdk.ipynb](getting-started/get_started_with_chirp_sdk.ipynb): This Jupyter Notebook provides a basic introduction to performing speech recognition using the Chirp model with Google Cloud's Speech-to-Text API. + - [get_started_with_chirp_2_sdk.ipynb](getting-started/get_started_with_chirp_2_sdk.ipynb): This Jupyter Notebook provides a basic introduction to performing speech recognition using the Chirp 2 model with Google Cloud's Speech-to-Text API. - **Use Cases:** - **Storytelling:** - [storytelling.ipynb](use-cases/storytelling/storytelling.ipynb): This notebook delves into a specific application of speech technology - crafting engaging stories. It likely leverages both speech recognition and generation to create interactive or automated storytelling experiences. diff --git a/gemini/sample-apps/photo-discovery/README.md b/gemini/sample-apps/photo-discovery/README.md index ce4bf68c6c..347b88cdc8 100644 --- a/gemini/sample-apps/photo-discovery/README.md +++ b/gemini/sample-apps/photo-discovery/README.md @@ -2,20 +2,19 @@ ![Vertex AI Agent Builder & Flutter Multi-platform Demo – Fallingwater](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/photo-discovery/showcase.png) -This project is a demo that integrates a Vertex AI Agent with a multiplatform Flutter app. Flutter is used as the client app framework, Vertex AI Search is used as a vector DB, and Reasoning Engine helps us build and deploy an agent with LangChain on Vertex AI. +This project is a demo that integrates a Vertex AI Agent with a multi-platform Flutter app. Flutter is used as the client app framework, Vertex AI Search is used as a vector DB, and Reasoning Engine helps us build and deploy an agent with LangChain on Vertex AI. - - -Users can run the Flutter app and take or upload a photo of a landmark. The app identifies the subject name and provides a brief description. +Users can run the Flutter app and take or upload a photo of a landmark. The app identifies the subject name and provides a brief description. To learn more about the subject of the photo, tap "tell me more" to chat with Khanh, an AI agent build built with Vertex AI Agent Builder, and ask follow-up questions about the history of subject in the photo based on information Wikipedia. -The agent can also identify merchandise from the [Google Merchandise Store](https://your.googlemerchandisestore.com) and provide product name, description, pricing, and purchase link by referencing a Google Merchandise store dataset. +The agent can also identify merchandise from the [Google Merchandise Store](https://your.merch.google/) and provide product name, description, pricing, and purchase link by referencing a Google Merchandise store dataset. > [!NOTE] Check out the Google I/O 2024 talk for a full walkthrough: [Build generative AI agents with Vertex AI Agent Builder and Flutter](https://youtu.be/V8P_S9OLI_I?si=N2QMBs7HNZL6mKU0). ## Demo + [Try the live demo app](https://photo-discovery-demo.web.app/) ![Chat UI - Lake Wakatipu](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/photo-discovery/demo.gif) From 05b1113966676fcd8c9621c2f9e7fc840de81bc0 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:39:39 -0500 Subject: [PATCH 46/76] ci: Remove Javascript Standard from Superlinter (#1217) --- .github/actions/spelling/excludes.txt | 1 + .github/workflows/linter.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt index d482619f72..8ca285fc1b 100644 --- a/.github/actions/spelling/excludes.txt +++ b/.github/actions/spelling/excludes.txt @@ -82,6 +82,7 @@ ^\.github/actions/spelling/ ^\Q.github/workflows/spelling.yaml\E$ ^\Q.github/workflows/notebook_linter/run_linter.sh\E$ +^\Q.github/workflows/linter.yaml\E$ ^\Qgemini/function-calling/use_case_company_news_and_insights.ipynb\E$ ^\Qgemini/getting-started/intro_gemini_1_5_pro.ipynb\E$ ^\Qgemini/getting-started/intro_gemini_pro_vision_python.ipynb\E$ diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index e9e1139ec1..eddd527e88 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -61,3 +61,4 @@ jobs: VALIDATE_PYTHON_ISORT: false VALIDATE_TYPESCRIPT_STANDARD: false # super-linter/super-linter#4445 VALIDATE_CHECKOV: false + VALIDATE_JAVASCRIPT_STANDARD: false From 235494f8117c50c3d8391eb10392b1ddd1e4b80f Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:54:43 -0500 Subject: [PATCH 47/76] ci: update renovate.json exclude for genwealth (#1219) --- renovate.json | 1 - 1 file changed, 1 deletion(-) diff --git a/renovate.json b/renovate.json index 890d84603b..e6d6b00bd2 100644 --- a/renovate.json +++ b/renovate.json @@ -25,7 +25,6 @@ "extends": ["schedule:quarterly"] }, { - "matchDepTypes": ["all"], "excludePaths": ["gemini/sample-apps/genwealth/**"] } ], From 46459b9b0723a0f97963c745d4026f9dde827e45 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:59:04 -0500 Subject: [PATCH 48/76] ci: Remove medium.com from lychee checks (#1220) Fixes #1193 Fixes #1187 --- lychee.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/lychee.toml b/lychee.toml index eedead2d39..9dc16ad8b6 100644 --- a/lychee.toml +++ b/lychee.toml @@ -3,6 +3,7 @@ exclude = [ 'https://fonts.googleapis.com/', 'http://go/github', 'http://go/github-googlecloudplatform', + 'https://medium.com/', ] exclude_path = [ ".github/actions/spelling", From 710920c3088920161530dc337e0b85468de964f6 Mon Sep 17 00:00:00 2001 From: Katie Nguyen <21978337+katiemn@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:41:44 -0700 Subject: [PATCH 49/76] feat: image segmentation to psd file notebook (#1213) # Description This new notebook highlights a use case for the image segmentation API. The user uploads an image, then chooses up to four segmentation options. Finally, all of the various image masks are added as layers and saved as a separate PSD file that's uploaded to a Google Cloud Storage bucket. --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- .github/actions/spelling/allow.txt | 1 + .../use-cases/image_segmentation_layers.ipynb | 954 ++++++++++++++++++ 2 files changed, 955 insertions(+) create mode 100644 vision/use-cases/image_segmentation_layers.ipynb diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index f4e3efaddf..084936e4cc 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -206,6 +206,7 @@ Parmar Persero Phaidon Pharma +Photoshop Pistorius Priyanka QPM diff --git a/vision/use-cases/image_segmentation_layers.ipynb b/vision/use-cases/image_segmentation_layers.ipynb new file mode 100644 index 0000000000..81602f94b7 --- /dev/null +++ b/vision/use-cases/image_segmentation_layers.ipynb @@ -0,0 +1,954 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uxCkB_DXTHzf" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hny4I-ODTIS6" + }, + "source": [ + "# Create a Photoshop Document with Image Segmentation on Vertex AI\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Run in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Run in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author(s) | [Katie Nguyen](https://github.com/katiemn) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-nLS57E2TO5y" + }, + "source": [ + "## Overview\n", + "\n", + "### Image Segmentation\n", + "\n", + "Image Segmentation on Vertex AI brings Google's state of the art segmentation models to developers as a scalable and reliable service.\n", + "\n", + "With the Vertex AI Image Segmentation API, developers can choose from five different modes to segment images and build AI products.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Gb8FlC1uULsE" + }, + "source": [ + "In this tutorial, you will learn how to use the Vertex AI API to interact with the Image Segmentation model to:\n", + "\n", + "- Segment images using different modes to create image masks\n", + "- Turn those image masks to individual layers in a PSD file\n", + "- Save the PSD file to a Cloud Storage bucket" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mvKl-BtQTRiQ" + }, + "source": [ + "## Get Started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l2IULNJBtaGS" + }, + "source": [ + "### Install Vertex AI SDK for Python and Wand" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PPmd1BWXthqG" + }, + "outputs": [], + "source": [ + "!sudo apt-get install libmagickwand-dev\n", + "\n", + "%pip install --upgrade --user --quiet google-cloud-aiplatform Wand" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ozjbBiWUuXeG" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-sW9xQiMufrZ" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2N9y_BrHUt_I" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "opUxT_k5TdgP" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you are running this notebook on Google Colab, run the following cell to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "vbNgv4q1T2Mi" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ybBXSukZkgjg" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI API\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "q7YvbXXdtzDT" + }, + "outputs": [], + "source": [ + "from google.cloud import aiplatform\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n", + "LOCATION = \"us-central1\" # @param {type:\"string\"}\n", + "\n", + "aiplatform.init(project=PROJECT_ID, location=LOCATION)\n", + "\n", + "api_regional_endpoint = f\"{LOCATION}-aiplatform.googleapis.com\"\n", + "client_options = {\"api_endpoint\": api_regional_endpoint}\n", + "client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)\n", + "\n", + "model_endpoint = f\"projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/image-segmentation-001\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6ncLgoOYVl-b" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "bE5g6hrVWNw3" + }, + "outputs": [], + "source": [ + "import base64\n", + "import io\n", + "import math\n", + "import os\n", + "from random import randrange\n", + "import re\n", + "import typing\n", + "\n", + "import IPython\n", + "from PIL import Image as PIL_Image\n", + "from PIL import ImageOps as PIL_ImageOps\n", + "from google.cloud import storage\n", + "import ipywidgets as widgets\n", + "import matplotlib.pyplot as plt\n", + "from vertexai.preview.vision_models import Image as VertexAI_Image\n", + "from wand.image import Image as Wand_Image" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sWK6SPaAZ5hu" + }, + "source": [ + "### Define helper functions" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "Ju_PctW22NUl" + }, + "outputs": [], + "source": [ + "# Parses the mask bytes from the response and converts it to an Image PIL object\n", + "def prediction_to_mask_pil(prediction) -> PIL_Image:\n", + " encoded_mask_string = prediction[\"bytesBase64Encoded\"]\n", + " mask_bytes = base64.b64decode(encoded_mask_string)\n", + " mask_pil = PIL_Image.open(io.BytesIO(mask_bytes))\n", + " mask_pil.thumbnail((4096, 4096))\n", + " return mask_pil\n", + "\n", + "\n", + "# Displays a PIL image horizontally next to a generated mask from the response\n", + "def display_horizontally(input_images: list, figsize: tuple[int, int] = (20, 5)):\n", + " rows: int = math.ceil(len(input_images) / 4) # Display at most 4 images per row\n", + " cols: int = min(\n", + " len(input_images) + 1, 4\n", + " ) # Adjust columns based on the number of images\n", + " fig, axis = plt.subplots(nrows=rows, ncols=cols, figsize=figsize)\n", + "\n", + " for i, ax in enumerate(axis.flat):\n", + " if i < len(input_images):\n", + " cmap = \"gray\" if i > 0 else None\n", + " ax.imshow(input_images[i], cmap)\n", + " # Adjust the axis aspect ratio to maintain image proportions\n", + " ax.set_aspect(\"equal\")\n", + " # Disable axis ticks\n", + " ax.set_xticks([])\n", + " ax.set_yticks([])\n", + " ax.axis(\"off\")\n", + " else:\n", + " # Hide empty subplots\n", + " ax.axis(\"off\")\n", + "\n", + " # Adjust the layout to minimize whitespace between subplots.\n", + " plt.tight_layout()\n", + " plt.show()\n", + "\n", + "\n", + "def display_image(\n", + " image: VertexAI_Image,\n", + " max_width: int = 4096,\n", + " max_height: int = 4096,\n", + ") -> None:\n", + " pil_image = typing.cast(PIL_Image.Image, image._pil_image)\n", + " if pil_image.mode != \"RGB\":\n", + " # RGB is supported by all Jupyter environments (e.g. RGBA is not yet)\n", + " pil_image = pil_image.convert(\"RGB\")\n", + " image_width, image_height = pil_image.size\n", + " if max_width < image_width or max_height < image_height:\n", + " # Resize to display a smaller notebook image\n", + " pil_image = PIL_ImageOps.contain(pil_image, (max_width, max_height))\n", + " IPython.display.display(pil_image)\n", + "\n", + "\n", + "# Constructs a Vertex AI PredictRequest for the Image Segmentation model\n", + "def call_vertex_image_segmentation(\n", + " gcs_uri=None,\n", + " mode=\"foreground\",\n", + " prompt=None,\n", + "):\n", + " instances = []\n", + " if gcs_uri:\n", + " instances.append(\n", + " {\n", + " \"image\": {\"gcsUri\": gcs_uri},\n", + " }\n", + " )\n", + " if prompt:\n", + " instances[0][\"prompt\"] = prompt\n", + "\n", + " parameters = {\"mode\": mode}\n", + " response = client.predict(\n", + " endpoint=model_endpoint, instances=instances, parameters=parameters\n", + " )\n", + "\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R45VRKWInfQQ" + }, + "source": [ + "### Select an image to segment from a Google Cloud Storage URI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "whY3dM8XEGgG" + }, + "outputs": [], + "source": [ + "file_path = \"gs://\" # @param {type:\"string\"}\n", + "\n", + "# Load the image file as Image object\n", + "image_file = VertexAI_Image.load_from_file(file_path)\n", + "display_image(image_file)\n", + "\n", + "image_file.save(\"original.png\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fU32286ooc8Q" + }, + "source": [ + "### Segment images using different modes\n", + "\n", + "You can generate image masks with different Image Segmentation features by setting the `mode` field to one of the available options:\n", + "* **Foreground**: Generate a mask of the segmented foreground of the image.\n", + "* **Background**: Generate a mask of the segmented background of the image.\n", + "* **Semantic**: Select the items in an image to segment from a set of 194 classes.\n", + "* **Prompt**: Use an open-vocabulary text prompt to guide the image segmentation.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mBLJtICO8iMQ" + }, + "source": [ + "### Foreground segmentation request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c9N8l0oo_cWs" + }, + "outputs": [], + "source": [ + "gcs_uri = file_path\n", + "mode = \"foreground\"\n", + "prompt = None # Prompt to guide segmentation for `semantic` and `prompt` modes\n", + "\n", + "response = call_vertex_image_segmentation(gcs_uri, mode, prompt)\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "MASK_PIL.save(\"foreground.png\")\n", + "BACKGROUND_PIL = PIL_Image.open(\"original.png\")\n", + "display_horizontally([BACKGROUND_PIL, MASK_PIL])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "--7rofOb95hT" + }, + "source": [ + "### Background segmentation request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JVtC3lFAGoAu" + }, + "outputs": [], + "source": [ + "gcs_uri = file_path\n", + "mode = \"background\"\n", + "prompt = None # Prompt to guide segmentation for `semantic` and `prompt` modes\n", + "\n", + "response = call_vertex_image_segmentation(gcs_uri, mode, prompt)\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "MASK_PIL.save(\"background.png\")\n", + "BACKGROUND_PIL = PIL_Image.open(\"original.png\")\n", + "display_horizontally([BACKGROUND_PIL, MASK_PIL])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U9pfcnNsGtcv" + }, + "source": [ + "### Semantic segmentation request\n", + "\n", + "Specify the objects to segment from the set of 194 classes. For your convenience, the classes have been arranged into seven separate categories. Run the cell below and select your classes. To select multiple options from the same category, press ctrl or command and click on your selections." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RRt9J4cS3qnt" + }, + "outputs": [], + "source": [ + "from IPython.display import display\n", + "\n", + "home_and_furniture = widgets.SelectMultiple(\n", + " options=[\n", + " \"oven\",\n", + " \"toaster\",\n", + " \"ottoman\",\n", + " \"sink\",\n", + " \"wardrobe\",\n", + " \"refrigerator\",\n", + " \"chest_of_drawers\",\n", + " \"dishwasher\",\n", + " \"bookshelf\",\n", + " \"armchair\",\n", + " \"toilet\",\n", + " \"counter_other\",\n", + " \"bathtub\",\n", + " \"bathroom_counter\",\n", + " \"shower\",\n", + " \"kitchen_island\",\n", + " \"hair_dryer\",\n", + " \"door\",\n", + " \"couch\",\n", + " \"toothbrush\",\n", + " \"light_other\",\n", + " \"lamp\",\n", + " \"sconce\",\n", + " \"nightstand\",\n", + " \"microwave\",\n", + " \"bed\",\n", + " \"ceiling\",\n", + " \"mirror\",\n", + " \"cup\",\n", + " \"shelf\",\n", + " \"knife\",\n", + " \"stairs\",\n", + " \"fork\",\n", + " \"spoon\",\n", + " \"curtain_other\",\n", + " \"cabinet\",\n", + " \"bowl\",\n", + " \"television\",\n", + " \"fireplace\",\n", + " \"tray\",\n", + " \"floor\",\n", + " \"stove\",\n", + " \"range_hood\",\n", + " \"towel\",\n", + " \"plate\",\n", + " \"rug_floormat\",\n", + " \"wall\",\n", + " \"window\",\n", + " \"washer_dryer\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Home\",\n", + " disabled=False,\n", + ")\n", + "\n", + "food = widgets.SelectMultiple(\n", + " options=[\n", + " \"broccoli\",\n", + " \"carrot\",\n", + " \"hot_dog\",\n", + " \"pizza\",\n", + " \"donut\",\n", + " \"cake\",\n", + " \"fruit_other\",\n", + " \"food_other\",\n", + " \"bottle\",\n", + " \"wine_glass\",\n", + " \"banana\",\n", + " \"apple\",\n", + " \"sandwich\",\n", + " \"orange\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Food\",\n", + " disabled=False,\n", + ")\n", + "\n", + "outdoor_and_recreation = widgets.SelectMultiple(\n", + " options=[\n", + " \"road\",\n", + " \"mountain_hill\",\n", + " \"snow\",\n", + " \"rock\",\n", + " \"sidewalk_pavement\",\n", + " \"frisbee\",\n", + " \"runway\",\n", + " \"skis\",\n", + " \"terrain\",\n", + " \"snowboard\",\n", + " \"sports_ball\",\n", + " \"baseball_bat\",\n", + " \"baseball_glove\",\n", + " \"skateboard\",\n", + " \"surfboard\",\n", + " \"tennis_racket\",\n", + " \"net\",\n", + " \"tunnel\",\n", + " \"bridge\",\n", + " \"tent\",\n", + " \"awning\",\n", + " \"river_lake\",\n", + " \"sea\",\n", + " \"bus\",\n", + " \"bench\",\n", + " \"train\",\n", + " \"bike_rack\",\n", + " \"vegetation\",\n", + " \"truck\",\n", + " \"waterfall\",\n", + " \"bicycle\",\n", + " \"trailer\",\n", + " \"sky\",\n", + " \"car\",\n", + " \"traffic_sign\",\n", + " \"boat_ship\",\n", + " \"autorickshaw\",\n", + " \"traffic_light\",\n", + " \"motorcycle\",\n", + " \"airplane\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Outdoor\",\n", + " disabled=False,\n", + ")\n", + "\n", + "office_and_work = widgets.SelectMultiple(\n", + " options=[\n", + " \"storage_tank\",\n", + " \"desk\",\n", + " \"conveyor_belt\",\n", + " \"suitcase\",\n", + " \"chair_other\",\n", + " \"swivel_chair\",\n", + " \"laptop\",\n", + " \"whiteboard\",\n", + " \"keyboard\",\n", + " \"mouse\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Office\",\n", + " disabled=False,\n", + ")\n", + "clothing_and_accessories = widgets.SelectMultiple(\n", + " options=[\"backpack\", \"bag\", \"tie\", \"apparel\"],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Clothing\",\n", + " disabled=False,\n", + ")\n", + "\n", + "animals = widgets.SelectMultiple(\n", + " options=[\n", + " \"bird\",\n", + " \"cat\",\n", + " \"dog\",\n", + " \"horse\",\n", + " \"sheep\",\n", + " \"cow\",\n", + " \"elephant\",\n", + " \"bear\",\n", + " \"zebra\",\n", + " \"giraffe\",\n", + " \"animal_other\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Animals\",\n", + " disabled=False,\n", + ")\n", + "\n", + "miscellaneous = widgets.SelectMultiple(\n", + " options=[\n", + " \"pool_table\",\n", + " \"umbrella\",\n", + " \"barrel\",\n", + " \"case\",\n", + " \"book\",\n", + " \"crib\",\n", + " \"box\",\n", + " \"kite\",\n", + " \"basket\",\n", + " \"clock\",\n", + " \"fan\",\n", + " \"vase\",\n", + " \"scissors\",\n", + " \"plaything_other\",\n", + " \"stool\",\n", + " \"teddy_bear\",\n", + " \"seat\",\n", + " \"base\",\n", + " \"trash_can\",\n", + " \"painting\",\n", + " \"sculpture\",\n", + " \"pier_wharf\",\n", + " \"potted_plant\",\n", + " \"poster\",\n", + " \"column\",\n", + " \"bulletin_board\",\n", + " \"fountain\",\n", + " \"building\",\n", + " \"chandelier\",\n", + " \"radiator\",\n", + " \"table\",\n", + " \"stage\",\n", + " \"arcade_machine\",\n", + " \"banner\",\n", + " \"gravel\",\n", + " \"flag\",\n", + " \"platform\",\n", + " \"blanket\",\n", + " \"remote\",\n", + " \"escalator\",\n", + " \"playingfield\",\n", + " \"cell phone\",\n", + " \"railroad\",\n", + " \"shower_curtain\",\n", + " \"fire_hydrant\",\n", + " \"pillow\",\n", + " \"parking_meter\",\n", + " \"road_barrier\",\n", + " \"water_other\",\n", + " \"mailbox\",\n", + " \"swimming_pool\",\n", + " \"person\",\n", + " \"cctv_camera\",\n", + " \"billboard\",\n", + " \"rider_other\",\n", + " \"junction_box\",\n", + " \"bicyclist\",\n", + " \"pole\",\n", + " \"motorcyclist\",\n", + " \"slow_wheeled_object\",\n", + " \"fence\",\n", + " \"window_blind\",\n", + " \"paper\",\n", + " \"streetlight\",\n", + " \"railing_banister\",\n", + " \"guard_rail\",\n", + " ],\n", + " value=[],\n", + " rows=5,\n", + " description=\"Miscellaneous\",\n", + " disabled=False,\n", + ")\n", + "\n", + "display(home_and_furniture)\n", + "display(food)\n", + "display(outdoor_and_recreation)\n", + "display(office_and_work)\n", + "display(clothing_and_accessories)\n", + "display(animals)\n", + "display(miscellaneous)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m3PX4mOOecgf" + }, + "source": [ + "Combine all your segmentation class selections into a single string for the request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NxqwxEef_sej" + }, + "outputs": [], + "source": [ + "item_string = \",\".join(\n", + " home_and_furniture.value\n", + " + food.value\n", + " + outdoor_and_recreation.value\n", + " + office_and_work.value\n", + " + clothing_and_accessories.value\n", + " + animals.value\n", + " + miscellaneous.value\n", + ")\n", + "print(item_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BVDUNa7UevoN" + }, + "source": [ + "Regardless of the number of classes, a semantic segmentation request will return a single image mask with all detected items from the request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Aar3Pn3yG75T" + }, + "outputs": [], + "source": [ + "gcs_uri = file_path\n", + "mode = \"semantic\"\n", + "prompt = item_string\n", + "\n", + "response = call_vertex_image_segmentation(gcs_uri, mode, prompt)\n", + "\n", + "MASK_PIL = prediction_to_mask_pil(response.predictions[0])\n", + "MASK_PIL.save(\"semantic.png\")\n", + "BACKGROUND_PIL = PIL_Image.open(\"original.png\")\n", + "display_horizontally([BACKGROUND_PIL, MASK_PIL])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CO4q2sacIydg" + }, + "source": [ + "### Open vocabulary segmentation request\n", + "\n", + "Provide a prompt to guide the image segmentation. Unlike other modes, an open vocabulary request will produce multiple image masks based on the prompt." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "unrHSqhqHmlI" + }, + "outputs": [], + "source": [ + "# Delete local prompt based masks from previous runs\n", + "pattern = re.compile(\"prompt*\")\n", + "for file in os.listdir(\".\"):\n", + " if pattern.match(file):\n", + " os.remove(file)\n", + "\n", + "gcs_uri = file_path\n", + "mode = \"prompt\"\n", + "prompt = \"[your-prompt]\" # @param {type:\"string\"}\n", + "\n", + "response = call_vertex_image_segmentation(gcs_uri, mode, prompt)\n", + "\n", + "BACKGROUND_PIL = PIL_Image.open(\"original.png\")\n", + "images = [BACKGROUND_PIL]\n", + "for i in range(len(response.predictions)):\n", + " MASK_PIL = prediction_to_mask_pil(response.predictions[i])\n", + " MASK_PIL.save(\"prompt\" + str(i) + \".png\")\n", + " images.append(MASK_PIL)\n", + "\n", + "display_horizontally(images)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UrRBCQ4lf78V" + }, + "source": [ + "### Select masks to apply to PSD file\n", + "\n", + "Run the following cell to generate a checklist of all possible segmentation modes you may have previously generated. Then, select all modes you would like to be included in the final PSD file. All of the specified image masks will be included as separate layers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FPMmGO5TFLYG" + }, + "outputs": [], + "source": [ + "from IPython.display import display\n", + "\n", + "foreground_checkbox = widgets.Checkbox(\n", + " value=True, description=\"Foreground Mask\", disabled=False\n", + ")\n", + "background_checkbox = widgets.Checkbox(\n", + " value=True, description=\"Background Mask\", disabled=False\n", + ")\n", + "semantic_checkbox = widgets.Checkbox(\n", + " value=True, description=\"Semantic Mask\", disabled=False\n", + ")\n", + "prompt_checkbox = widgets.Checkbox(\n", + " value=True, description=\"Prompt Mask\", disabled=False\n", + ")\n", + "\n", + "\n", + "display(foreground_checkbox)\n", + "display(background_checkbox)\n", + "display(semantic_checkbox)\n", + "display(prompt_checkbox)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rFAh_A01gc_G" + }, + "source": [ + "### Add selected mask images as layers\n", + "\n", + "Once the layers are added, you will save the final PSD file." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "VXCM_QOz3Uln" + }, + "outputs": [], + "source": [ + "with Wand_Image(filename=\"original.png\") as img:\n", + " img.read(filename=\"original.png\")\n", + "\n", + " if foreground_checkbox.value:\n", + " img.read(filename=\"foreground.png\")\n", + " if background_checkbox.value:\n", + " img.read(filename=\"background.png\")\n", + " if semantic_checkbox.value:\n", + " img.read(filename=\"semantic.png\")\n", + " if prompt_checkbox.value:\n", + " pattern = re.compile(\"prompt*\")\n", + " for file in os.listdir(\".\"):\n", + " if pattern.match(file):\n", + " img.read(filename=file)\n", + "\n", + " img.save(filename=\"output.psd\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C--fF0Wjg8Dp" + }, + "source": [ + "### Upload the PSD file to Google Cloud Storage bucket" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LW4VpNDsemLm" + }, + "outputs": [], + "source": [ + "prefix = \"psd_\" + str(randrange(10000, 100000))\n", + "bucket_name = \"[your-bucket-name]\" # @param {type:\"string\"}\n", + "\n", + "storage_client = storage.Client()\n", + "bucket = storage_client.bucket(bucket_name)\n", + "blob = bucket.blob(prefix)\n", + "\n", + "blob.upload_from_filename(\"output.psd\")\n", + "print(\"Uploaded \" + prefix)" + ] + } + ], + "metadata": { + "colab": { + "name": "image_segmentation_layers.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From fd77968adb0ee3fb91413477bd33c2e7bc47960b Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Tue, 8 Oct 2024 18:05:52 +0200 Subject: [PATCH 50/76] fix: Libraries import and quota banner (#1221) # Description A quick fix for libraries import and quota banner to prevent quota error. Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .../vertex_ai_prompt_optimizer_sdk.ipynb | 31 ++++++++++--------- ...i_prompt_optimizer_sdk_custom_metric.ipynb | 13 ++++++-- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb index 762ec120c5..f1bd611ed3 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb @@ -361,7 +361,9 @@ "- `Storage Object Admin` to read and write to your GCS bucket.\n", "- `Artifact Registry Reader` to download the pipeline template from Artifact Registry.\n", "\n", - "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account.\n" + "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account. \n", + "\n", + "**Important**: If you run following commands using Vertex AI Workbench, please directly run in the terminal. " ] }, { @@ -383,17 +385,12 @@ }, "outputs": [], "source": [ - "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", - " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", - " --role=roles/aiplatform.user\n", - "\n", - "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", - " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", - " --role=roles/storage.objectAdmin\n", - "\n", - "! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", - " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", - " --role=roles/artifactregistry.reader" + "for role in ['aiplatform.user', 'storage.objectAdmin', 'artifactregistry.reader']:\n", + " \n", + " ! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", + " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", + " --role=roles/{role} \\ \n", + " --condition=None" ] }, { @@ -480,7 +477,7 @@ "\n", "from google.cloud import aiplatform\n", "import pandas as pd\n", - "from utils.helpers import (\n", + "from tutorial.utils.helpers import (\n", " async_generate,\n", " display_eval_report,\n", " evaluate_task,\n", @@ -908,7 +905,11 @@ "source": [ "#### Run the automatic prompt optimization job\n", "\n", - "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n" + "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n", + "\n", + "**Important:** Be sure you have provisioned enough queries per minute (QPM) quota and the recommended QPM for each model. If you configure the Vertex AI prompt optimizer with a QPM that is higher than the QPM than you have access to, the job will fail. \n", + "\n", + "[Check out](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#before-you-begin) the documentation to know more. \n" ] }, { @@ -937,7 +938,7 @@ " worker_pool_specs=WORKER_POOL_SPECS,\n", ")\n", "\n", - "custom_job.run()" + "custom_job.run(service_account=SERVICE_ACCOUNT)" ] }, { diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb index fa8b39fca6..3fbdbf2bac 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb @@ -372,7 +372,9 @@ "- `Artifact Registry Reader` to download the pipeline template from Artifact Registry.\n", "- `Cloud Run Developer` to deploy function on Cloud Run.\n", "\n", - "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account.\n" + "[Check out the documentation](https://cloud.google.com/iam/docs/manage-access-service-accounts#iam-view-access-sa-gcloud) to know how to grant those permissions to a single service account.\n", + "\n", + "**Important**: If you run following commands using Vertex AI Workbench, please directly run in the terminal. \n" ] }, { @@ -398,7 +400,8 @@ " \n", " ! gcloud projects add-iam-policy-binding {PROJECT_ID} \\\n", " --member=serviceAccount:{SERVICE_ACCOUNT} \\\n", - " --role=roles/{role}" + " --role=roles/{role} \\ \n", + " --condition=None" ] }, { @@ -1146,7 +1149,11 @@ "source": [ "#### Run the automatic prompt optimization job\n", "\n", - "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n" + "Now you are ready to run your first Vertex AI Prompt Optimizer (Preview) job using the Vertex AI SDK for Python.\n", + "\n", + "**Important:** Be sure you have provisioned enough queries per minute (QPM) quota and the recommended QPM for each model. If you configure the Vertex AI prompt optimizer with a QPM that is higher than the QPM than you have access to, the job will fail. \n", + "\n", + "[Check out](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/prompt-optimizer#before-you-begin) the documentation to know more. " ] }, { From 60c25678def4a051aa093f88a09addf3636db5b5 Mon Sep 17 00:00:00 2001 From: Ivan Nardini <88703814+inardini@users.noreply.github.com> Date: Tue, 8 Oct 2024 18:44:18 +0200 Subject: [PATCH 51/76] feat: Adding Chirp 2 notebook with advanced features (#1226) # Description In this tutorial, you learn about how to use Chirp 2, the latest generation of Google's multilingual ASR-specific models, and its new features, including word-level timestamps, model adaptation, and speech translation. Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot Co-authored-by: Eric Dong --- .../get_started_with_chirp_2_sdk.ipynb | 1 - ...et_started_with_chirp_2_sdk_features.ipynb | 1013 +++++++++++++++++ .../use-cases/image_segmentation_layers.ipynb | 2 + 3 files changed, 1015 insertions(+), 1 deletion(-) create mode 100644 audio/speech/getting-started/get_started_with_chirp_2_sdk_features.ipynb diff --git a/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb b/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb index 4976608f8b..02101cfe01 100644 --- a/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb +++ b/audio/speech/getting-started/get_started_with_chirp_2_sdk.ipynb @@ -1041,7 +1041,6 @@ "outputs": [], "source": [ "real_time_request = cloud_speech.RecognizeRequest(\n", - " recognizer=f\"projects/{PROJECT_ID}/locations/{LOCATION}/recognizers/_\",\n", " config=real_time_config,\n", " content=short_audio_sample_bytes,\n", " recognizer=RECOGNIZER,\n", diff --git a/audio/speech/getting-started/get_started_with_chirp_2_sdk_features.ipynb b/audio/speech/getting-started/get_started_with_chirp_2_sdk_features.ipynb new file mode 100644 index 0000000000..d86baa61ce --- /dev/null +++ b/audio/speech/getting-started/get_started_with_chirp_2_sdk_features.ipynb @@ -0,0 +1,1013 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Get started with Chirp 2 - Advanced features\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Ivan Nardini](https://github.com/inardini) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "In this tutorial, you learn about how to use [Chirp 2](https://cloud.google.com/speech-to-text/v2/docs/chirp_2-model), the latest generation of Google's multilingual ASR-specific models, and its new features, including word-level timestamps, model adaptation, and speech translation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Speech-to-Text SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "e73_ZgKWYedz" + }, + "outputs": [], + "source": [ + "! apt update -y -qq\n", + "! apt install ffmpeg -y -qq" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --quiet 'google-cloud-speech' 'protobuf<4.21' 'google-auth==2.27.0' 'pydub' 'etils' 'jiwer' 'ffmpeg-python' 'plotly' 'gradio'" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Speech-to-Text V2 SDK\n", + "\n", + "To get started using the Speech-to-Text API, you must have an existing Google Cloud project and [enable the Speech-to-Text API](https://console.cloud.google.com/flows/enableapi?apiid=speech.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WIQyBhAn_9tK" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "from google.api_core.client_options import ClientOptions\n", + "from google.cloud.speech_v2 import SpeechClient\n", + "\n", + "API_ENDPOINT = f\"{LOCATION}-speech.googleapis.com\"\n", + "\n", + "client = SpeechClient(\n", + " client_options=ClientOptions(\n", + " api_endpoint=API_ENDPOINT,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "from google.cloud.speech_v2.types import cloud_speech\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qqm0OQpAYCph" + }, + "outputs": [], + "source": [ + "import io\n", + "import os\n", + "\n", + "import IPython.display as ipd\n", + "from etils import epath as ep\n", + "from pydub import AudioSegment" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sP8GBj3tBAC1" + }, + "source": [ + "### Set constants" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rXTVeU1uBBqY" + }, + "outputs": [], + "source": [ + "INPUT_AUDIO_SAMPLE_FILE_URI = (\n", + " \"gs://github-repo/audio_ai/speech_recognition/attention_is_all_you_need_podcast.wav\"\n", + ")\n", + "\n", + "RECOGNIZER = client.recognizer_path(PROJECT_ID, LOCATION, \"_\")\n", + "\n", + "MAX_CHUNK_SIZE = 25600" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "djgFxrGC_Ykd" + }, + "source": [ + "### Helpers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Zih8W_wC_caW" + }, + "outputs": [], + "source": [ + "def read_audio_file(audio_file_path: str) -> bytes:\n", + " \"\"\"\n", + " Read audio file as bytes.\n", + " \"\"\"\n", + " if audio_file_path.startswith(\"gs://\"):\n", + " with ep.Path(audio_file_path).open(\"rb\") as f:\n", + " audio_bytes = f.read()\n", + " else:\n", + " with open(audio_file_path, \"rb\") as f:\n", + " audio_bytes = f.read()\n", + " return audio_bytes\n", + "\n", + "\n", + "def save_audio_sample(audio_bytes: bytes, output_file_uri: str) -> None:\n", + " \"\"\"\n", + " Save audio sample as a file in Google Cloud Storage.\n", + " \"\"\"\n", + "\n", + " output_file_path = ep.Path(output_file_uri)\n", + " if not output_file_path.parent.exists():\n", + " output_file_path.parent.mkdir(parents=True, exist_ok=True)\n", + "\n", + " with output_file_path.open(\"wb\") as f:\n", + " f.write(audio_bytes)\n", + "\n", + "\n", + "def extract_audio_sample(audio_bytes: bytes, duration: int) -> bytes:\n", + " \"\"\"\n", + " Extracts a random audio sample of a given duration from an audio file.\n", + " \"\"\"\n", + " audio = AudioSegment.from_file(io.BytesIO(audio_bytes))\n", + " start_time = 0\n", + " audio_sample = audio[start_time : start_time + duration * 1000]\n", + "\n", + " audio_bytes = io.BytesIO()\n", + " audio_sample.export(audio_bytes, format=\"wav\")\n", + " audio_bytes.seek(0)\n", + "\n", + " return audio_bytes.read()\n", + "\n", + "\n", + "def play_audio_sample(audio_bytes: bytes) -> None:\n", + " \"\"\"\n", + " Plays the audio sample in a notebook.\n", + " \"\"\"\n", + " audio_file = io.BytesIO(audio_bytes)\n", + " ipd.display(ipd.Audio(audio_file.read(), rate=44100))\n", + "\n", + "\n", + "def parse_real_time_recognize_response(response) -> list[tuple[str, int]]:\n", + " \"\"\"Parse real-time responses from the Speech-to-Text API\"\"\"\n", + " real_time_recognize_results = []\n", + " for result in response.results:\n", + " real_time_recognize_results.append(\n", + " (result.alternatives[0].transcript, result.result_end_offset)\n", + " )\n", + " return real_time_recognize_results\n", + "\n", + "\n", + "def parse_words_real_time_recognize_response(response):\n", + " \"\"\"\n", + " Parse the word-level results from a real-time speech recognition response.\n", + " \"\"\"\n", + " real_time_recognize_results = []\n", + " for result in response.results:\n", + " for word_info in result.alternatives[0].words:\n", + " word = word_info.word\n", + " start_time = word_info.start_offset.seconds\n", + " end_time = word_info.end_offset.seconds\n", + " real_time_recognize_results.append(\n", + " {\"word\": word, \"start\": start_time, \"end\": end_time}\n", + " )\n", + " return real_time_recognize_results\n", + "\n", + "\n", + "def print_transcription(\n", + " audio_sample_bytes: bytes, transcriptions: str, play_audio=True\n", + ") -> None:\n", + " \"\"\"Prettify the play of the audio and the associated print of the transcription text in a notebook\"\"\"\n", + "\n", + " if play_audio:\n", + " # Play the audio sample\n", + " display(ipd.HTML(\"Audio:\"))\n", + " play_audio_sample(audio_sample_bytes)\n", + " display(ipd.HTML(\"
\"))\n", + "\n", + " # Display the transcription text\n", + " display(ipd.HTML(\"Transcription:\"))\n", + " for transcription, _ in transcriptions:\n", + " formatted_text = f\"
{transcription}
\"\n", + " display(ipd.HTML(formatted_text))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y4MO5i9X4yq3" + }, + "source": [ + "### Prepare audio samples\n", + "\n", + "The podcast audio is ~ 8 mins. Depending on the audio length, you can use different transcribe API methods. To learn more, check out the official documentation. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4uTeBXo6dZlS" + }, + "source": [ + "#### Read the audio file\n", + "\n", + "Let's start reading the input audio sample you want to transcribe.\n", + "\n", + "In this case, it is a podcast generated with NotebookLM about the \"Attention is all you need\" [paper](https://arxiv.org/abs/1706.03762)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pjzwMWqpdldM" + }, + "outputs": [], + "source": [ + "input_audio_bytes = read_audio_file(INPUT_AUDIO_SAMPLE_FILE_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cIAl9Lyd4niN" + }, + "source": [ + "#### Prepare a short audio sample (< 1 min)\n", + "\n", + "Extract a short audio sample from the original one for streaming and real-time audio processing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MofmWRSH4niO" + }, + "outputs": [], + "source": [ + "short_audio_sample_bytes = extract_audio_sample(input_audio_bytes, 30)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AC2YeY7v4niO" + }, + "outputs": [], + "source": [ + "play_audio_sample(short_audio_sample_bytes)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VPVDNRyVxquo" + }, + "source": [ + "## Improve transcription using Chirp 2's word-timing and speech adaptation features\n", + "\n", + "Chirp 2 supports word-level timestamps for each transcribed word and speech adaptation to help the model improving recognition accuracy for specific terms or proper nouns." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oYCgDay2hAgB" + }, + "source": [ + "### Perform real-time speech recognition with word-timing" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F83r9aiNhAgD" + }, + "source": [ + "#### Define real-time recognition configuration with `enable_word_time_offsets` parameter.\n", + "\n", + "You define the real-time recognition configuration which allows you to set the model to use, language code of the audio and more.\n", + "\n", + "In this case, you enable word timing feature. When True, the top result includes a list of words and the start and end time offsets (timestamps) for those words." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j0WprZ-phAgD" + }, + "outputs": [], + "source": [ + "wt_real_time_config = cloud_speech.RecognitionConfig(\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + " language_codes=[\"en-US\"],\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_word_time_offsets=True,\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r2TqksAqhAgD" + }, + "source": [ + "#### Define the real-time request configuration\n", + "\n", + "Next, you define the real-time request passing the configuration and the audio sample you want to transcribe.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nh55mSzXhAgD" + }, + "outputs": [], + "source": [ + "wt_real_time_request = cloud_speech.RecognizeRequest(\n", + " config=wt_real_time_config, content=short_audio_sample_bytes, recognizer=RECOGNIZER\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "817YXVBli0aY" + }, + "source": [ + "#### Run the real-time recognition request\n", + "\n", + "Finally you submit the real-time recognition request." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rc0cBrVsi7UG" + }, + "outputs": [], + "source": [ + "wt_response = client.recognize(request=wt_real_time_request)\n", + "wt_real_time_recognize_results = parse_real_time_recognize_response(wt_response)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J2vpMSv7CZ_2" + }, + "source": [ + "And you use a helper function to visualize transcriptions and the associated streams." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ezH51rLH4CBR" + }, + "outputs": [], + "source": [ + "for transcription, _ in wt_real_time_recognize_results:\n", + " print_transcription(short_audio_sample_bytes, transcription)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iFhUcPcO-Zeh" + }, + "source": [ + "#### Visualize word timings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mhH42sab-3Tg" + }, + "outputs": [], + "source": [ + "n = 10\n", + "word_timings = parse_words_real_time_recognize_response(wt_response)\n", + "for word_info in word_timings[:n]:\n", + " print(\n", + " f\"Word: {word_info['word']} - Start: {word_info['start']} sec - End: {word_info['end']} sec\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IFOq3SK0qOT_" + }, + "source": [ + "### Improve real-time speech recognition accuracy with model adaptation\n", + "\n", + "So far, Chirp 2 transcribes the podcast correctly. That's in part because podcasts are recorded in ideal enviroments like a recording studio. But that's not always the case. For example, suppose that your audio data is recorded in noisy environment or the recording has strong accents or someone speaks quickly.\n", + "\n", + "To handle this and many other scenarios and improve real-time speech recognition accuracy, you can use model adaptation. To enable model adaptation with Chirp 2, you use the `adaptation` parameter.\n", + "\n", + "With `adaptation` parameter, you provide \"hints\" to the speech recognizer to favor specific words and phrases (`AdaptationPhraseSet` class) in the results. And for each hint you can define a hint boost which is the probability that a specific word or phrase will be recognized over other similar sounding phrases. Be careful to use higher boost. Higher the boost, higher is the chance of false positive recognition as well. We recommend using a binary search approach to finding the optimal value for your use case as well as adding phrases both with and without boost to your requests.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1--AAmsYq-vG" + }, + "source": [ + "#### Define real-time recognition configuration with `adaptation` parameter\n", + "\n", + "You define a new real-time recognition configuration which includes the `adaptation` configuration.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qUr76NRcq-vH" + }, + "outputs": [], + "source": [ + "adaptation_real_time_config = cloud_speech.RecognitionConfig(\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + " language_codes=[\"en-US\"],\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + " adaptation=cloud_speech.SpeechAdaptation(\n", + " phrase_sets=[\n", + " cloud_speech.SpeechAdaptation.AdaptationPhraseSet(\n", + " inline_phrase_set=cloud_speech.PhraseSet(\n", + " phrases=[\n", + " {\n", + " \"value\": \"you know\", # often mumbled or spoken quickly\n", + " \"boost\": 10.0,\n", + " },\n", + " {\n", + " \"value\": \"what are they called again?\" # hesitations and changes in intonation\n", + " },\n", + " {\n", + " \"value\": \"Yeah, it's wild.\" # short interjections have brevity and the emotional inflection\n", + " },\n", + " ]\n", + " )\n", + " )\n", + " ]\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2Lm8e-2Cq-vH" + }, + "source": [ + "#### Define the real-time request configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pH9ZxWFIq-vH" + }, + "outputs": [], + "source": [ + "adaptation_real_time_request = cloud_speech.RecognizeRequest(\n", + " config=adaptation_real_time_config,\n", + " content=short_audio_sample_bytes,\n", + " recognizer=RECOGNIZER,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xaQSQMZZq-vH" + }, + "source": [ + "#### Run the real-time recognition request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "KYiCZjDWq-vH" + }, + "outputs": [], + "source": [ + "adapted_response = client.recognize(request=adaptation_real_time_request)\n", + "adapted_real_time_recognize_results = parse_real_time_recognize_response(\n", + " adapted_response\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xOjvJHHmq-vH" + }, + "source": [ + "And you use a helper function to visualize transcriptions and the associated streams." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7Xipn8H4q-vH" + }, + "outputs": [], + "source": [ + "for transcription, _ in adapted_real_time_recognize_results:\n", + " print_transcription(short_audio_sample_bytes, transcription)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s0sIpQmJr40s" + }, + "source": [ + "## Transcript and translate using language-agnostic transcription and language translation\n", + "\n", + "Chirp 2 supports language-agnostic audio transcription and language translation. This means that Chirp 2 is capable of recognizing the language of the input audio and, at the same time, translate the outcome transcription in many different language.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5QpacKEDRStJ" + }, + "source": [ + "#### Define real-time recognition configuration with `language_code` and `translationConfig` parameters.\n", + "\n", + "You define a real-time recognition configuration by setting language codes in both `language_codes` and `translationConfig` parameters :\n", + "\n", + "* When `language_codes=[\"auto\"]`, you enable language-agnostic transcription to auto to detect language.\n", + "\n", + "* When `target_language=language_code` where `language_code` is one of the language in this list but different from the original language, you enable language translation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9vaW49XqUD2v" + }, + "outputs": [], + "source": [ + "target_language_code = \"ca-ES\" # @param {type:\"string\", isTemplate: true}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "y3Z_vaKpRStK" + }, + "outputs": [], + "source": [ + "ts_real_time_config = cloud_speech.RecognitionConfig(\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + " language_codes=[\"en-US\"],\n", + " translation_config=cloud_speech.TranslationConfig(\n", + " target_language=target_language_code\n", + " ),\n", + " model=\"chirp_2\",\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nPGCDLWARStK" + }, + "source": [ + "#### Define the real-time request configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5_spCiHDRStK" + }, + "outputs": [], + "source": [ + "ts_real_time_request = cloud_speech.RecognizeRequest(\n", + " config=ts_real_time_config, content=short_audio_sample_bytes, recognizer=RECOGNIZER\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Mzr69sLjRStK" + }, + "source": [ + "#### Run the real-time recognition request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NvcPOWLkRStK" + }, + "outputs": [], + "source": [ + "ts_response = client.recognize(request=ts_real_time_request)\n", + "ts_real_time_recognize_results = parse_real_time_recognize_response(ts_response)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "STjng1ZiRStK" + }, + "source": [ + "And you use a helper function to visualize transcriptions and the associated streams." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EhTgg3qwRStL" + }, + "outputs": [], + "source": [ + "print_transcription(short_audio_sample_bytes, transcription, play_audio=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_MkA144iQrAn" + }, + "source": [ + "## Chirp 2 playground\n", + "\n", + "To play with Chirp 2, you can create a simple Gradio application where you enable several Chirp 2 features.\n", + "\n", + "Below you have an example for language-agnostic transcription and language translation with Chirp 2.\n", + "\n", + "To know more, check out the official documentation [here](https://cloud.google.com/speech-to-text/v2/docs/chirp_2-model).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WjuuZHTbQwkF" + }, + "outputs": [], + "source": [ + "def transcribe_audio(audio, enable_translation, target_language_code):\n", + " \"\"\"Transcribe the given audio file with optional features.\"\"\"\n", + "\n", + " # Set variables\n", + " project_id = os.environ.get(\"GOOGLE_CLOUD_PROJECT\", PROJECT_ID)\n", + " location = os.environ.get(\"GOOGLE_CLOUD_REGION\", LOCATION)\n", + " api_endpoint = f\"{location}-speech.googleapis.com\"\n", + "\n", + " # initiate client\n", + " client = SpeechClient(\n", + " client_options=ClientOptions(\n", + " api_endpoint=api_endpoint,\n", + " )\n", + " )\n", + "\n", + " # read the audio\n", + " with open(audio, \"rb\") as audio_file:\n", + " content = audio_file.read()\n", + "\n", + " # define language agnostic real time recognition configuration\n", + " real_time_config = cloud_speech.RecognitionConfig(\n", + " model=\"chirp_2\",\n", + " language_codes=[\"auto\"],\n", + " features=cloud_speech.RecognitionFeatures(\n", + " enable_automatic_punctuation=True,\n", + " ),\n", + " auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n", + " )\n", + "\n", + " if enable_translation:\n", + " real_time_config.language_codes = [\"en-US\"]\n", + " real_time_config.translation_config = cloud_speech.TranslationConfig(\n", + " target_language=target_language_code\n", + " )\n", + "\n", + " # define real-time recognition request\n", + " recognizer = client.recognizer_path(project_id, location, \"_\")\n", + "\n", + " real_time_request = cloud_speech.RecognizeRequest(\n", + " config=real_time_config,\n", + " content=content,\n", + " recognizer=recognizer,\n", + " )\n", + "\n", + " response = client.recognize(request=real_time_request)\n", + "\n", + " full_transcript = \"\"\n", + " for result in response.results:\n", + " full_transcript += result.alternatives[0].transcript + \" \"\n", + " return full_transcript.strip()\n", + "\n", + "\n", + "def speech_to_text(audio, enable_translation=False, target_language_code=None):\n", + " if audio is None:\n", + " return \"\"\n", + "\n", + " text = transcribe_audio(audio, enable_translation, target_language_code)\n", + " return text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HQzUnSkErDTN" + }, + "outputs": [], + "source": [ + "# Create Gradio interface\n", + "demo = gr.Interface(\n", + " fn=speech_to_text,\n", + " inputs=[\n", + " gr.Audio(type=\"filepath\", label=\"Audio input\"),\n", + " gr.Checkbox(label=\"🧠 Enable Translation\"),\n", + " gr.Dropdown(\n", + " label=\"Select language to translate\",\n", + " choices=[\"ca-ES\", \"cy-GB\", \"de-DE\", \"ja-JP\", \"zh-Hans-CN\"],\n", + " interactive=True,\n", + " multiselect=False,\n", + " ),\n", + " ],\n", + " outputs=[gr.Textbox(label=\"📄 Transcription\")],\n", + " title=\"Chirp 2 Playground\",\n", + " description=\"

Speak or pass an audio and get the transcription!

\",\n", + ")\n", + "\n", + "# Launch the app\n", + "demo.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XqTpn06QrEiZ" + }, + "outputs": [], + "source": [ + "demo.close()" + ] + } + ], + "metadata": { + "colab": { + "name": "get_started_with_chirp_2_sdk_features.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/vision/use-cases/image_segmentation_layers.ipynb b/vision/use-cases/image_segmentation_layers.ipynb index 81602f94b7..f72be454b9 100644 --- a/vision/use-cases/image_segmentation_layers.ipynb +++ b/vision/use-cases/image_segmentation_layers.ipynb @@ -278,6 +278,8 @@ "outputs": [], "source": [ "# Parses the mask bytes from the response and converts it to an Image PIL object\n", + "\n", + "\n", "def prediction_to_mask_pil(prediction) -> PIL_Image:\n", " encoded_mask_string = prediction[\"bytesBase64Encoded\"]\n", " mask_bytes = base64.b64decode(encoded_mask_string)\n", From dc3700493e921389ea1678653cdba5ef48205958 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 9 Oct 2024 16:15:04 +0200 Subject: [PATCH 52/76] chore(deps): update lycheeverse/lychee-action action to v2 (#1228) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [lycheeverse/lychee-action](https://redirect.github.com/lycheeverse/lychee-action) | action | major | `v1` -> `v2` | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
lycheeverse/lychee-action (lycheeverse/lychee-action) ### [`v2`](https://redirect.github.com/lycheeverse/lychee-action/compare/v1...v2) [Compare Source](https://redirect.github.com/lycheeverse/lychee-action/compare/v1...v2)
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- .github/workflows/links.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/links.yaml b/.github/workflows/links.yaml index 5db7bd015d..1ffd417dba 100644 --- a/.github/workflows/links.yaml +++ b/.github/workflows/links.yaml @@ -14,7 +14,7 @@ jobs: - name: Link Checker id: lychee - uses: lycheeverse/lychee-action@v1 + uses: lycheeverse/lychee-action@v2 - name: Create Issue From File if: env.lychee_exit_code != 0 From ca5a50624968d665075b3e48db803f55a9a22eec Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Thu, 10 Oct 2024 10:33:18 +0800 Subject: [PATCH 53/76] New notebook on fine-tuning gemini 1.5 flash for qa (#1229) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...sed_finetuning_using_gemini_qa_ipynb.ipynb | 1683 +++++++++++++++++ 1 file changed, 1683 insertions(+) create mode 100644 gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb diff --git a/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb new file mode 100644 index 0000000000..2e8ccc4bfd --- /dev/null +++ b/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb @@ -0,0 +1,1683 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ojoyvz6mH1Hv" + }, + "source": [ + "# Supervised Fine Tuning with Gemini 1.5 Flash for Q&A\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Erwin Huizenga](https://github.com/erwinh85) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "## Overview\n", + "\n", + "**Gemini** is a family of generative AI models developed by Google DeepMind designed for multimodal use cases. The Gemini API gives you access to the various Gemini models, such as Gemini 1.5 Pro and Gemini 1.5 Flash.\n", + "This notebook demonstrates fine-tuning the Gemini 1.5 Flahs using the Vertex AI Supervised Tuning feature. Supervised Tuning allows you to use your own labeled training data to further refine the base model's capabilities toward your specific tasks.\n", + "Supervised Tuning uses labeled examples to tune a model. Each example demonstrates the output you want from your text model during inference.\n", + "First, ensure your training data is of high quality, well-labeled, and directly relevant to the target task. This is crucial as low-quality data can adversely affect the performance and introduce bias in the fine-tuned model.\n", + "Training: Experiment with different configurations to optimize the model's performance on the target task.\n", + "Evaluation:\n", + "Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", + "Evaluation Set: Use a separate set of data to evaluate the model's performance\n", + "\n", + "\n", + "Refer to public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning) for more details.\n", + "\n", + "
\n", + "\n", + "Before running this notebook, ensure you have:\n", + "\n", + "- A Google Cloud project: Provide your project ID in the `PROJECT_ID` variable.\n", + "\n", + "- Authenticated your Colab environment: Run the authentication code block at the beginning.\n", + "\n", + "- Prepared training data (Test with your own data or use the one in the notebook): Data should be formatted in JSONL with prompts and corresponding completions." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f7SS5pzuIA-1" + }, + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "* Vertex AI\n", + "* Cloud Storage\n", + "\n", + "Learn about [Vertex AI\n", + "pricing](https://cloud.google.com/vertex-ai/pricing), [Cloud Storage\n", + "pricing](https://cloud.google.com/storage/pricing), and use the [Pricing\n", + "Calculator](https://cloud.google.com/products/calculator/)\n", + "to generate a cost estimate based on your projected usage.\n", + "\n", + "To get an estimate of the number of tokens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --upgrade --user --quiet google-cloud-aiplatform datasets" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XRvKdaPDTznN", + "outputId": "b9219138-e8f3-4cfd-e324-9d61ef383732" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'status': 'ok', 'restart': True}" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "# Vertex AI SDK\n", + "from google.cloud import aiplatform\n", + "from google.cloud.aiplatform.metadata import context\n", + "from google.cloud.aiplatform.metadata import utils as metadata_utils\n", + "from vertexai.generative_models import (\n", + " GenerationConfig,\n", + " GenerativeModel,\n", + " HarmBlockThreshold,\n", + " HarmCategory,\n", + ")\n", + "from vertexai.preview.tuning import sft\n", + "\n", + "# Vertex AI SDK\n", + "from sklearn.metrics import f1_score\n", + "from sklearn.feature_extraction.text import TfidfVectorizer\n", + "import pandas as pd\n", + "import array\n", + "import time\n", + "from datasets import load_dataset\n", + "import numpy as np\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6bBZa2I-c-x8" + }, + "source": [ + "### Data\n", + "\n", + "#### SQuAD dataset\n", + "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", + "\n", + "You can fine more information on the SQuAD [github page](https://rajpurkar.github.io/SQuAD-explorer/)**bold text**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KhebDJjRKePL" + }, + "source": [ + "First update the `BUCKET_NAME` parameter below. You can either use an existing bucket or create a new one." + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "lit30Cktbfvo", + "outputId": "273ee3ae-cb16-42fd-9d59-898826d2fb60" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "gs://tuning-demo-erwinh/gemini-tuning\n" + ] + } + ], + "source": [ + "# Provide a bucket name\n", + "BUCKET_NAME = \"tuning-demo-erwinh/gemini-tuning\" # @param {type:\"string\"}\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\"\n", + "print(BUCKET_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ed-G-9cyKmPY" + }, + "source": [ + "Only run the code below if you want to create a new Google Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0UJ8S9YFA1pZ" + }, + "outputs": [], + "source": [ + "# ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "izjwF63tLLEq" + }, + "source": [ + "Next you will copy the data into your bucket." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wjvcxx_sA3xP" + }, + "outputs": [], + "source": [ + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_test.csv .\n", + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_train.csv .\n", + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_validation.csv ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3F10LuZeL3kt" + }, + "source": [ + "### Baseline\n", + "\n", + "Next you will prepare some test data that you will use to establish a baseline. This means evaluating your chosen model on a representative sample of your dataset before any fine-tuning. A baseline allows you to quantify the improvements achieved through fine-tuning." + ] + }, + { + "cell_type": "code", + "execution_count": 181, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 98 + }, + "id": "LkOmXpegA8CW", + "outputId": "297f4339-83fc-4a4a-9ed0-62a469ac1acd" + }, + "outputs": [ + { + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "summary": "{\n \"name\": \"test_df\",\n \"rows\": 40,\n \"fields\": [\n {\n \"column\": \"id\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"5725bae289a1e219009abd92\",\n \"5726431aec44d21400f3dd13\",\n \"57269fab5951b619008f7808\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 26,\n \"samples\": [\n \"Teacher\",\n \"Ctenophora\",\n \"Normans\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 38,\n \"samples\": [\n \"On May 21, 2013, NFL owners at their spring meetings in Boston voted and awarded the game to Levi's Stadium. The $1.2 billion stadium opened in 2014. It is the first Super Bowl held in the San Francisco Bay Area since Super Bowl XIX in 1985, and the first in California since Super Bowl XXXVII took place in San Diego in 2003.\",\n \"The contracted batch of 15 Saturn Vs were enough for lunar landing missions through Apollo 20. NASA publicized a preliminary list of eight more planned landing sites, with plans to increase the mass of the CSM and LM for the last five missions, along with the payload capacity of the Saturn V. These final missions would combine the I and J types in the 1967 list, allowing the CMP to operate a package of lunar orbital sensors and cameras while his companions were on the surface, and allowing them to stay on the Moon for over three days. These missions would also carry the Lunar Roving Vehicle (LRV) increasing the exploration area and allowing televised liftoff of the LM. Also, the Block II spacesuit was revised for the extended missions to allow greater flexibility and visibility for driving the LRV.\",\n \"In July 1977, General Zia-ul-Haq overthrew Prime Minister Zulfiqar Ali Bhutto's regime in Pakistan. Ali Bhutto, a leftist in democratic competition with Islamists, had announced banning alcohol and nightclubs within six months, shortly before he was overthrown. Zia-ul-Haq was much more committed to Islamism, and \\\"Islamization\\\" or implementation of Islamic law, became a cornerstone of his eleven-year military dictatorship and Islamism became his \\\"official state ideology\\\". Zia ul Haq was an admirer of Mawdudi and Mawdudi's party Jamaat-e-Islami became the \\\"regime's ideological and political arm\\\". In Pakistan this Islamization from above was \\\"probably\\\" more complete \\\"than under any other regime except those in Iran and Sudan,\\\" but Zia-ul-Haq was also criticized by many Islamists for imposing \\\"symbols\\\" rather than substance, and using Islamization to legitimize his means of seizing power. Unlike neighboring Iran, Zia-ul-Haq's policies were intended to \\\"avoid revolutionary excess\\\", and not to strain relations with his American and Persian Gulf state allies. Zia-ul-Haq was killed in 1988 but Islamization remains an important element in Pakistani society.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"How many species of Ctenophores have not been fully described or named?\",\n \"What was Tymnet\",\n \"What is Michael Carrick and Alan Shearer's profession?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answers\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"possibly another 25\",\n \"an international data communications network headquartered in San Jose, CA\",\n \"international footballers\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}", + "type": "dataframe", + "variable_name": "test_df" + }, + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
idtitlecontextquestionanswers
056de3cd0cffd8e1900b4b6bfNormansNormans came into Scotland, building castles a...What culture's arrival in Scotland is know as ...Norman
\n", + "
\n", + "
\n", + "\n", + "
\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + "\n", + "\n", + "
\n", + "
\n" + ], + "text/plain": [ + " id title \\\n", + "0 56de3cd0cffd8e1900b4b6bf Normans \n", + "\n", + " context \\\n", + "0 Normans came into Scotland, building castles a... \n", + "\n", + " question answers \n", + "0 What culture's arrival in Scotland is know as ... Norman " + ] + }, + "execution_count": 181, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "test_df = pd.read_csv('squad_test.csv')\n", + "test_df.head(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mrLxcVVcMsNO" + }, + "source": [ + "You will need to do some dataset preperations. We will add a system instruction to the dataset:\n", + "\n", + "`SystemInstruct`: System instructions are a set of instructions that the model processes before it processes prompts. We recommend that you use system instructions to tell the model how you want it to behave and respond to prompts.\n", + "\n", + "We will also combine the `context` and `question`." + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": { + "id": "c0pgJycOekZ3" + }, + "outputs": [], + "source": [ + "systemInstruct = \"Answer the question based on the context\"" + ] + }, + { + "cell_type": "code", + "execution_count": 182, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "N_u3VzUMsyqj", + "outputId": "ff16f581-73de-4595-aeed-6a80b39e8d4e" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Answer the question based on the context\n", + "Context: In July 1973, as part of its outreach programme to young people, the V&A became the first museum in Britain to present a rock concert. The V&A presented a combined concert/lecture by British progressive folk-rock band Gryphon, who explored the lineage of mediaeval music and instrumentation and related how those contributed to contemporary music 500 years later. This innovative approach to bringing young people to museums was a hallmark of the directorship of Roy Strong and was subsequently emulated by some other British museums.\n", + "Question: Which musical group did the V&A present in July 1973 as part of its youth outreach programme?\n" + ] + } + ], + "source": [ + "# combine the systeminstruct + context + question into one column.\n", + "row_dataset = 6\n", + "\n", + "test_df[\"input_question\"] = systemInstruct + \"\\n\" + \"Context: \" + test_df[\"context\"] + \"\\n\" + \"Question: \" + test_df[\"question\"]\n", + "test_question = test_df[\"input_question\"].iloc[row_dataset]\n", + "print(test_question)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FSxYYqMGWrmj" + }, + "source": [ + "Next, set the model that you will use. In this example you will use `gemini-1.5-flash-002`. A multimodal model that is designed for high-volume, cost-effective applications, and which delivers speed and efficiency to build fast, lower-cost applications that don't compromise on quality.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": { + "id": "t-5X4goiqqBQ" + }, + "outputs": [], + "source": [ + "base_model = \"gemini-1.5-flash-002\"\n", + "generation_model = GenerativeModel(base_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wyscyIenW4WZ" + }, + "source": [ + "Next lets take a question and get a prediction from Gemini that we can compare to the actual answer." + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "metadata": { + "id": "cXencUYc6YAE" + }, + "outputs": [], + "source": [ + "def get_predictions(question: str) -> str:\n", + " \"\"\"Generates predictions for a given test question.\n", + "\n", + " Args:\n", + " test_question: The question to generate predictions for.\n", + "\n", + " Returns:\n", + " The generated prediction text.\n", + " \"\"\"\n", + "\n", + " prompt = f\"{question}\"\n", + "\n", + " generation_config = GenerationConfig(\n", + " temperature=0.1)\n", + "\n", + " response = generation_model.generate_content(\n", + " contents=prompt, generation_config=generation_config\n", + " ).text\n", + "\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 186, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gKa0wLooa3Is", + "outputId": "ce10cb15-31c9-4fbe-af4d-6c8d65139648" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gemini response: The V&A presented the British progressive folk-rock band Gryphon.\n", + "\n", + "Actual answer: Gryphon\n" + ] + } + ], + "source": [ + "test_answer = test_df[\"answers\"].iloc[row_dataset]\n", + "\n", + "response = get_predictions(test_question)\n", + "\n", + "print(f\"Gemini response: {response}\")\n", + "print(f\"Actual answer: {test_answer}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OGRJTHKrdujw" + }, + "source": [ + "You can see that both answers are correct, but the response from Gemini is more lengthy. However, answers in the SQuAD dataset are typically concise and clear.\n", + "\n", + "Fine-tuning is a great way to control the type of output your use case requires. In this instance, you would want the model to provide short, clear answers.\n", + "\n", + "Next, let's check if each dataset has an equal number of examples." + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "dCe0CUsi5E-Y", + "outputId": "8e89fbf4-7483-448e-b50a-4bfd50adeb75" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of strings in y_pred: 40\n", + "Number of strings in y_true: 40\n" + ] + } + ], + "source": [ + "num_strings_pred = np.sum([isinstance(item, str) for item in y_pred])\n", + "print(f\"Number of strings in y_pred: {num_strings_pred}\")\n", + "\n", + "num_strings_true = np.sum([isinstance(item, str) for item in y_true])\n", + "print(f\"Number of strings in y_true: {num_strings_true}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hvi7m8pKE8WB" + }, + "source": [ + "Next lest establish a baseline using evaluation metrics.\n", + "\n", + "Evaluating the performance of a Question Answering (QA) system requires specific metrics. Two commonly used metrics are Exact Match (EM) and F1 score.\n", + "\n", + "EM is a strict measure that only considers an answer correct if it perfectly matches the ground truth, even down to the punctuation. It's a binary metric - either 1 for a perfect match or 0 otherwise. This makes it sensitive to minor variations in phrasing.\n", + "\n", + "F1 score is more flexible. It considers the overlap between the predicted answer and the true answer in terms of individual words or tokens. It calculates the harmonic mean of precision (proportion of correctly predicted words out of all predicted words) and recall (proportion of correctly predicted words out of all true answer words). This allows for partial credit and is less sensitive to minor wording differences.\n", + "\n", + "In practice, EM is useful when exact wording is crucial, while F1 is more suitable when evaluating the overall understanding and semantic accuracy of the QA system. Often, both metrics are used together to provide a comprehensive evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "metadata": { + "id": "XcgEpTU55FFc" + }, + "outputs": [], + "source": [ + "def calculate_em_and_f1_for_text_arrays(y_true, y_pred, average='weighted'):\n", + " \"\"\"\n", + " Calculates the Exact Match (EM) and F1 score for arrays of text\n", + " using word-level comparisons.\n", + "\n", + " Args:\n", + " y_true: An array of ground truth strings.\n", + " y_pred: An array of predicted strings.\n", + " average: The averaging method to use for F1 score.\n", + "\n", + " Returns:\n", + " A tuple containing the EM score and the F1 score.\n", + " \"\"\"\n", + "\n", + " em = np.mean([t == p for t, p in zip(y_true, y_pred)])\n", + "\n", + " # Use TF-IDF to convert strings to numerical vectors\n", + " vectorizer = TfidfVectorizer()\n", + " all_text = np.concatenate((y_true, y_pred))\n", + " vectorizer.fit(all_text)\n", + " y_true_vec = vectorizer.transform(y_true)\n", + " y_pred_vec = vectorizer.transform(y_pred)\n", + "\n", + " # Calculate F1 score based on common words (non-zero elements)\n", + " y_true_class = (y_true_vec > 0).toarray().astype(int)\n", + " y_pred_class = (y_pred_vec > 0).toarray().astype(int)\n", + "\n", + " f1 = f1_score(y_true_class, y_pred_class, average=average)\n", + "\n", + " return em, f1\n" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "rhDTq9p_GSBP", + "outputId": "4dde775e-2466-4ef7-e380-d23abddc6690" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EM score: 0.0\n", + "F1 score: 0.030862136294937427\n" + ] + } + ], + "source": [ + "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", + "print(f\"EM score: {em}\")\n", + "print(f\"F1 score: {f1}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "22DfexbNfUHm" + }, + "source": [ + "### Prepare the data for fine-tuning\n", + "\n", + "To optimize the tuning process for a foundation model, ensure your dataset includes examples that reflect the desired task. Structure your training data in a text-to-text format, where each record in the dataset pairs an input text (or prompt) with its corresponding expected output. This supervised tuning approach uses the dataset to effectively teach the model the specific behavior or task you need it to perform, by providing numerous illustrative examples.\n", + "\n", + "The size of your dataset will vary depending on the complexity of the task, but as a general rule, the more examples you include, the better the model's performance.\n", + "\n", + "Dataset Format\n", + "Your training data should be structured in a JSONL file and stored at a Google Cloud Storage (GCS) URI. Each line in the JSONL file must adhere to the following schema:\n", + "\n", + "A `contents` array containing objects that define:\n", + "- A `role` (\"user\" for user input or \"model\" for model output)\n", + "- `parts` containing the input data.\n", + "\n", + "```\n", + "{\n", + " \"contents\":[\n", + " {\n", + " \"role\":\"user\", # This indicate input content\n", + " \"parts\":[\n", + " {\n", + " \"text\":\"How are you?\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"role\":\"model\", # This indicate target content\n", + " \"parts\":[ # text only\n", + " {\n", + " \"text\":\"I am good, thank you!\"\n", + " }\n", + " ]\n", + " }\n", + " # ... repeat \"user\", \"model\" for multi turns.\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Refer to the public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-prepare#about-datasets) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "metadata": { + "id": "4DqrQp4cLqRy" + }, + "outputs": [], + "source": [ + "# combine the systeminstruct + context + question into one column.\n", + "train_df = pd.read_csv('squad_train.csv')\n", + "validation_df = pd.read_csv('squad_validation.csv')\n", + "\n", + "train_df[\"input_question\"] = systemInstruct + \"Context: \" + train_df[\"context\"] + \"Question: \" + train_df[\"question\"]\n", + "validation_df[\"input_question\"] = systemInstruct + \"Context: \" + validation_df[\"context\"] + \"Question: \" + validation_df[\"question\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 197, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Pmzyz1migvHN", + "outputId": "38b0b753-b526-41a8-d124-d73baa2152bc" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "JSONL data written to squad_train.jsonl\n", + "JSONL data written to squad_validation.jsonl\n" + ] + } + ], + "source": [ + "def df_to_jsonl(df, output_file):\n", + " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", + "\n", + " Args:\n", + " df: The DataFrame to convert.\n", + " output_file: The name of the output file.\n", + " \"\"\"\n", + "\n", + " with open(output_file, 'w') as f:\n", + " for row in df.itertuples(index=False):\n", + " jsonl_obj = {\n", + " \"systemInstruction\": {\"parts\": [{\"text\": \"Answer the question based on the provided context.\"}]},\n", + " \"contents\": [\n", + " {\n", + " \"role\": \"user\",\n", + " \"parts\": [{\"text\": f\"Context: {row.context}\\n\\nQuestion: {row.question}\"}]\n", + " },\n", + " {\"role\": \"model\", \"parts\": [{\"text\": row.answers}]},\n", + " ]\n", + " }\n", + " f.write(json.dumps(jsonl_obj) + '\\n')\n", + "\n", + "# Process the DataFrames\n", + "df_to_jsonl(train_df, 'squad_train.jsonl')\n", + "df_to_jsonl(validation_df, 'squad_validation.jsonl')\n", + "\n", + "print(f\"JSONL data written to squad_train.jsonl\")\n", + "print(f\"JSONL data written to squad_validation.jsonl\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5OQv-ZMpJDhi" + }, + "source": [ + "Next you will copy the files into your Google Cloud bucket" + ] + }, + { + "cell_type": "code", + "execution_count": 195, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "O5k1jYJ10IeW", + "outputId": "f6af525a-0c69-414b-a9f7-7340879f4868" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Copying file://./squad_train.jsonl [Content-Type=application/octet-stream]...\n", + "/ [1 files][527.0 KiB/527.0 KiB] \n", + "Operation completed over 1 objects/527.0 KiB. \n", + "Copying file://./squad_validation.jsonl [Content-Type=application/octet-stream]...\n", + "/ [1 files][110.9 KiB/110.9 KiB] \n", + "Operation completed over 1 objects/110.9 KiB. \n" + ] + } + ], + "source": [ + "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", + "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UAHMYgFJJHjm" + }, + "source": [ + "### Start fine-tuning job\n", + "Next you can start the fine-tuning job.\n", + "\n", + "- `source_model`: Specifies the base Gemini model version you want to fine-tune.\n", + " - `train_dataset`: Path to your training data in JSONL format.\n", + "\n", + " *Optional parameters*\n", + " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", + " - `tuned_model_display_name`: Display name for the tuned model.\n", + " - `epochs`: The number of training epochs to run.\n", + " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", + " - `adapter_size` : Gemini 1.5 Flash supports Adapter length [1, 4], default value is 4.\n", + "\n", + " **Important**: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" + ] + }, + { + "cell_type": "code", + "execution_count": 139, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 181 + }, + "id": "qj-LjQ5Vbf1E", + "outputId": "5af1f956-d5e3-4111-c100-85e60cc90890" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.tuning._tuning:Creating SupervisedTuningJob\n", + "INFO:vertexai.tuning._tuning:SupervisedTuningJob created. Resource name: projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840\n", + "INFO:vertexai.tuning._tuning:To use this SupervisedTuningJob in another session:\n", + "INFO:vertexai.tuning._tuning:tuning_job = sft.SupervisedTuningJob('projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840')\n", + "INFO:vertexai.tuning._tuning:View Tuning Job:\n", + "https://console.cloud.google.com/vertex-ai/generative/language/locations/us-central1/tuning/tuningJob/8356726629560483840?project=713601331534\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " tune\n", + " View Tuning Job\n", + " \n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tuned_model_display_name = \"erwinh-fine-tuning-flash\" # @param {type:\"string\"}\n", + "\n", + "sft_tuning_job = sft.train(\n", + " source_model=base_model,\n", + " train_dataset=f\"\"\"{BUCKET_URI}/squad_train.jsonl\"\"\",\n", + " # # Optional:\n", + " validation_dataset=f\"\"\"{BUCKET_URI}/squad_validation.jsonl\"\"\",\n", + " tuned_model_display_name=tuned_model_display_name,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 198, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "3tXawW1p8E5-", + "outputId": "83b11366-6ff2-4900-d6af-3a0f7a9aca19" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840',\n", + " 'tunedModelDisplayName': 'erwinh-fine-tuning-flash',\n", + " 'baseModel': 'gemini-1.5-flash-002',\n", + " 'supervisedTuningSpec': {'trainingDatasetUri': 'gs://tuning-demo-erwinh/gemini-tuning/squad_train.jsonl',\n", + " 'validationDatasetUri': 'gs://tuning-demo-erwinh/gemini-tuning/squad_validation.jsonl',\n", + " 'hyperParameters': {'epochCount': '10',\n", + " 'learningRateMultiplier': 1.0,\n", + " 'adapterSize': 'ADAPTER_SIZE_EIGHT'}},\n", + " 'state': 'JOB_STATE_SUCCEEDED',\n", + " 'createTime': '2024-10-09T06:20:11.698883Z',\n", + " 'startTime': '2024-10-09T06:20:11.740358Z',\n", + " 'endTime': '2024-10-09T06:32:38.867719Z',\n", + " 'updateTime': '2024-10-09T06:32:38.867719Z',\n", + " 'experiment': 'projects/713601331534/locations/us-central1/metadataStores/default/contexts/tuning-experiment-20241008232013040864',\n", + " 'tunedModel': {'model': 'projects/713601331534/locations/us-central1/models/1582035604160380928@1',\n", + " 'endpoint': 'projects/713601331534/locations/us-central1/endpoints/5693131570647400448'},\n", + " 'tuningDataStats': {'supervisedTuningDataStats': {'tuningDatasetExampleCount': '500',\n", + " 'userInputTokenDistribution': {'sum': '94474',\n", + " 'min': 54.0,\n", + " 'max': 602.0,\n", + " 'mean': 188.948,\n", + " 'median': 172.0,\n", + " 'p5': 107.0,\n", + " 'p95': 327.0,\n", + " 'buckets': [{'count': 126.0, 'left': 54.0, 'right': 145.0},\n", + " {'count': 273.0, 'left': 146.0, 'right': 236.0},\n", + " {'count': 77.0, 'left': 237.0, 'right': 328.0},\n", + " {'count': 21.0, 'left': 329.0, 'right': 419.0},\n", + " {'count': 2.0, 'left': 420.0, 'right': 511.0},\n", + " {'count': 1.0, 'left': 512.0, 'right': 602.0}]},\n", + " 'userOutputTokenDistribution': {'sum': '2420',\n", + " 'min': 1.0,\n", + " 'max': 43.0,\n", + " 'mean': 4.84,\n", + " 'median': 4.0,\n", + " 'p5': 1.0,\n", + " 'p95': 15.0,\n", + " 'buckets': [{'count': 440.0, 'left': 1.0, 'right': 8.0},\n", + " {'count': 38.0, 'left': 9.0, 'right': 15.0},\n", + " {'count': 16.0, 'left': 16.0, 'right': 22.0},\n", + " {'count': 4.0, 'left': 23.0, 'right': 29.0},\n", + " {'count': 1.0, 'left': 30.0, 'right': 36.0},\n", + " {'count': 1.0, 'left': 37.0, 'right': 43.0}]},\n", + " 'userMessagePerExampleDistribution': {'sum': '1000',\n", + " 'min': 2.0,\n", + " 'max': 2.0,\n", + " 'mean': 2.0,\n", + " 'median': 2.0,\n", + " 'p5': 2.0,\n", + " 'p95': 2.0,\n", + " 'buckets': [{'count': 500.0, 'left': 2.0, 'right': 2.0}]},\n", + " 'userDatasetExamples': [{'role': 'user',\n", + " 'parts': [{'text': 'Context: On the next day, December 18, protests turned into civil unrest as clashes between troops, volunteers, militia units, and Kazakh students turned into a wide-scale confrontation. The clashes could only be controlled on the third day. The Almaty events were followed by smaller protests and demonstrations in Shymkent, Pavlodar, Karaganda, and Taldykorgan. Reports from Kazakh SSR authoriti...'}]},\n", + " {'role': 'model', 'parts': [{'text': '3,000'}]},\n", + " {'role': 'user',\n", + " 'parts': [{'text': \"Context: Roman Catholicism was the sole established religion in the Holy Roman Empire until the Reformation changed this drastically. In 1517, Martin Luther challenged the Catholic Church as he saw it as a corruption of Christian faith. Through this, he altered the course of European and world history and established Protestantism. The Thirty Years' War (1618–1648) was one of the most destructi...\"}]},\n", + " {'role': 'model', 'parts': [{'text': 'Roman Catholicism'}]},\n", + " {'role': 'user',\n", + " 'parts': [{'text': \"Context: Israel retaliated against Egyptian shelling with commando raids, artillery shelling and air strikes. This resulted in an exodus of civilians from Egyptian cities along the Suez Canal's western bank. Nasser ceased all military activities and began a program to build a network of internal defenses, while receiving the financial backing of various Arab states. The war resumed in March 196...\"}]},\n", + " {'role': 'model', 'parts': [{'text': 'March 1969'}]}],\n", + " 'totalBillableTokenCount': '96894'}}}" + ] + }, + "execution_count": 198, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Get the tuning job info.\n", + "sft_tuning_job.to_dict()" + ] + }, + { + "cell_type": "code", + "execution_count": 199, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "19aQnN-k84d9", + "outputId": "5c7f8c4f-566f-4c12-ffc7-60b4340d03d3" + }, + "outputs": [ + { + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + }, + "text/plain": [ + "'projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840'" + ] + }, + "execution_count": 199, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Get the resource name of the tuning job\n", + "sft_tuning_job_name = sft_tuning_job.resource_name\n", + "sft_tuning_job_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UKo8cwF2KVM5" + }, + "source": [ + "**Important:** Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8NiZnPkIKcwm" + }, + "source": [ + "
\n", + "⚠️ It will take ~30 mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Njag_3cB86rH", + "outputId": "b1408519-3735-4aca-86aa-89b0da5699b1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 67.5 ms, sys: 4.98 ms, total: 72.5 ms\n", + "Wall time: 1.28 s\n" + ] + } + ], + "source": [ + "%%time\n", + "# Wait for job completion\n", + "while not sft_tuning_job.refresh().has_ended:\n", + " time.sleep(60)" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "dkx92RBdbf27", + "outputId": "0641746b-de08-4f9b-80cc-5afef558e7ea" + }, + "outputs": [ + { + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + }, + "text/plain": [ + "'projects/713601331534/locations/us-central1/models/1582035604160380928@1'" + ] + }, + "execution_count": 201, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# tuned model name\n", + "tuned_model_name = sft_tuning_job.tuned_model_name\n", + "tuned_model_name" + ] + }, + { + "cell_type": "code", + "execution_count": 202, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "e09aB_9Ebf5c", + "outputId": "c8a028c1-da5f-4e7d-d9db-ecde9c5293e1" + }, + "outputs": [ + { + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + }, + "text/plain": [ + "'projects/713601331534/locations/us-central1/endpoints/5693131570647400448'" + ] + }, + "execution_count": 202, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# tuned model endpoint name\n", + "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", + "tuned_model_endpoint_name" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gV1ukBznKmlN" + }, + "source": [ + "#### Model tuning metrics\n", + "\n", + "- `/train_total_loss`: Loss for the tuning dataset at a training step.\n", + "- `/train_fraction_of_correct_next_step_preds`: The token accuracy at a training step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the tuning dataset.\n", + "- `/train_num_predictions`: Number of predicted tokens at a training step\n", + "\n", + "#### Model evaluation metrics:\n", + "\n", + "- `/eval_total_loss`: Loss for the evaluation dataset at an evaluation step.\n", + "- `/eval_fraction_of_correct_next_step_preds`: The token accuracy at an evaluation step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the evaluation dataset.\n", + "- `/eval_num_predictions`: Number of predicted tokens at an evaluation step.\n", + "\n", + "The metrics visualizations are available after the model tuning job completes. If you don't specify a validation dataset when you create the tuning job, only the visualizations for the tuning metrics are available." + ] + }, + { + "cell_type": "code", + "execution_count": 155, + "metadata": { + "id": "DH0guHM---Jo" + }, + "outputs": [], + "source": [ + "# Locate Vertex AI Experiment and Vertex AI Experiment Run\n", + "experiment = aiplatform.Experiment(experiment_name=experiment_name)\n", + "filter_str = metadata_utils._make_filter_string(\n", + " schema_title=\"system.ExperimentRun\",\n", + " parent_contexts=[experiment.resource_name],\n", + ")\n", + "experiment_run = context.Context.list(filter_str)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "metadata": { + "id": "hggHQFIl_FXC" + }, + "outputs": [], + "source": [ + "# Read data from Tensorboard\n", + "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name.replace(experiment.name, '')[1:]}\"\n", + "tensorboard_run = aiplatform.TensorboardRun(tensorboard_run_name)\n", + "metrics = tensorboard_run.read_time_series_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "metadata": { + "id": "BdHKZdqG_bHf" + }, + "outputs": [], + "source": [ + "def get_metrics(metric: str = \"/train_total_loss\"):\n", + " \"\"\"\n", + " Get metrics from Tensorboard.\n", + "\n", + " Args:\n", + " metric: metric name, eg. /train_total_loss or /eval_total_loss.\n", + " Returns:\n", + " steps: list of steps.\n", + " steps_loss: list of loss values.\n", + " \"\"\"\n", + " loss_values = metrics[metric].values\n", + " steps_loss = []\n", + " steps = []\n", + " for loss in loss_values:\n", + " steps_loss.append(loss.scalar.value)\n", + " steps.append(loss.step)\n", + " return steps, steps_loss" + ] + }, + { + "cell_type": "code", + "execution_count": 158, + "metadata": { + "id": "_pDrlpA7_e9o" + }, + "outputs": [], + "source": [ + "# Get Train and Eval Loss\n", + "train_loss = get_metrics(metric=\"/train_total_loss\")\n", + "eval_loss = get_metrics(metric=\"/eval_total_loss\")" + ] + }, + { + "cell_type": "code", + "execution_count": 161, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 542 + }, + "id": "DL07j7u__iZx", + "outputId": "3cff463b-59f7-4db3-a884-e3a099583ab5" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "
\n", + "
\n", + "\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Plot the train and eval loss metrics using Plotly python library\n", + "fig = make_subplots(\n", + " rows=1, cols=2, shared_xaxes=True, subplot_titles=(\"Train Loss\", \"Eval Loss\")\n", + ")\n", + "\n", + "# Add traces\n", + "fig.add_trace(\n", + " go.Scatter(x=train_loss[0], y=train_loss[1], name=\"Train Loss\", mode=\"lines\"),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig.add_trace(\n", + " go.Scatter(x=eval_loss[0], y=eval_loss[1], name=\"Eval Loss\", mode=\"lines\"),\n", + " row=1,\n", + " col=2,\n", + ")\n", + "\n", + "# Add figure title\n", + "fig.update_layout(title=\"Train and Eval Loss\", xaxis_title=\"Steps\", yaxis_title=\"Loss\")\n", + "\n", + "# Set x-axis title\n", + "fig.update_xaxes(title_text=\"Steps\")\n", + "\n", + "# Set y-axes titles\n", + "fig.update_yaxes(title_text=\"Loss\")\n", + "\n", + "# Show plot\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "dYygz5ph_icf", + "outputId": "f15d6aa8-bbf3-46ca-e919-e91760c9ba90" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "***Testing***\n", + "candidates {\n", + " content {\n", + " role: \"model\"\n", + " parts {\n", + " text: \"European Council\\n\\n\"\n", + " }\n", + " }\n", + " finish_reason: STOP\n", + " avg_logprobs: -0.11596920092900594\n", + "}\n", + "usage_metadata {\n", + " prompt_token_count: 290\n", + " candidates_token_count: 3\n", + " total_token_count: 293\n", + "}\n", + "\n" + ] + } + ], + "source": [ + "if True:\n", + " tuned_genai_model = GenerativeModel(tuned_model_endpoint_name)\n", + " # Test with the loaded model.\n", + " print(\"***Testing***\")\n", + " print(\n", + " tuned_genai_model.generate_content(\n", + " contents=prompt, generation_config=generation_config\n", + " )\n", + " )\n", + "else:\n", + " print(\"State:\", sft_tuning_job.state)\n", + " print(\"Error:\", sft_tuning_job.error)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AbTaqCMxNf18" + }, + "source": [ + "### Model usage and evaluation.\n", + "\n", + "Next you will evaluate the model to see how well it performs. You can also compare it to the benchmark." + ] + }, + { + "cell_type": "code", + "execution_count": 164, + "metadata": { + "id": "W4YMNGuoDajB" + }, + "outputs": [], + "source": [ + "y_true = test_df[\"answers\"].values\n", + "\n", + "def get_predictions(test_question):\n", + "\n", + " prompt = f\"\"\"{test_question}\"\"\"\n", + "\n", + " generation_config = GenerationConfig(\n", + " temperature=0.1,\n", + " )\n", + "\n", + " response = tuned_genai_model.generate_content(contents=prompt, generation_config=generation_config).text\n", + "\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 165, + "metadata": { + "id": "69FMuAeoDrm5" + }, + "outputs": [], + "source": [ + "y_pred = []\n", + "y_pred_question = test_df[\"question\"].values\n", + "\n", + "for i in y_pred_question:\n", + "\n", + " prediction = get_predictions(i)\n", + " y_pred.append(prediction)" + ] + }, + { + "cell_type": "code", + "execution_count": 166, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "yj76Tu6ODalZ", + "outputId": "f5d76e1d-1fbf-4251-9c19-4f430a97ad0b" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EM score: 0.0\n", + "F1 score: 0.2399679487179487\n" + ] + } + ], + "source": [ + "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", + "print(f\"EM score: {em}\")\n", + "print(f\"F1 score: {f1}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Vkb2qXljFYqX" + }, + "source": [] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 331cf71675d76a8e02201804c670463f332adc99 Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Thu, 10 Oct 2024 10:42:01 +0800 Subject: [PATCH 54/76] minor fixes (#1230) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...sed_finetuning_using_gemini_qa_ipynb.ipynb | 822 +++--------------- 1 file changed, 111 insertions(+), 711 deletions(-) diff --git a/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb index 2e8ccc4bfd..85f96df281 100644 --- a/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb +++ b/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb @@ -94,7 +94,7 @@ "\n", "- Authenticated your Colab environment: Run the authentication code block at the beginning.\n", "\n", - "- Prepared training data (Test with your own data or use the one in the notebook): Data should be formatted in JSONL with prompts and corresponding completions." + "- Prepare your training data. " ] }, { @@ -139,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": { "id": "tFy3H3aPgx12" }, @@ -163,26 +163,11 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "XRvKdaPDTznN", - "outputId": "b9219138-e8f3-4cfd-e324-9d61ef383732" - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'status': 'ok', 'restart': True}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], "source": [ "import IPython\n", "\n", @@ -214,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "id": "NyKGtVQjgx13" }, @@ -243,7 +228,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "id": "Nqwi-5ufWp_B" }, @@ -273,7 +258,7 @@ }, { "cell_type": "code", - "execution_count": 189, + "execution_count": null, "metadata": { "id": "6fc324893334" }, @@ -328,23 +313,11 @@ }, { "cell_type": "code", - "execution_count": 95, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "lit30Cktbfvo", - "outputId": "273ee3ae-cb16-42fd-9d59-898826d2fb60" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "gs://tuning-demo-erwinh/gemini-tuning\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "lit30Cktbfvo" + }, + "outputs": [], "source": [ "# Provide a bucket name\n", "BUCKET_NAME = \"tuning-demo-erwinh/gemini-tuning\" # @param {type:\"string\"}\n", @@ -407,162 +380,11 @@ }, { "cell_type": "code", - "execution_count": 181, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 98 - }, - "id": "LkOmXpegA8CW", - "outputId": "297f4339-83fc-4a4a-9ed0-62a469ac1acd" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "summary": "{\n \"name\": \"test_df\",\n \"rows\": 40,\n \"fields\": [\n {\n \"column\": \"id\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"5725bae289a1e219009abd92\",\n \"5726431aec44d21400f3dd13\",\n \"57269fab5951b619008f7808\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 26,\n \"samples\": [\n \"Teacher\",\n \"Ctenophora\",\n \"Normans\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 38,\n \"samples\": [\n \"On May 21, 2013, NFL owners at their spring meetings in Boston voted and awarded the game to Levi's Stadium. The $1.2 billion stadium opened in 2014. It is the first Super Bowl held in the San Francisco Bay Area since Super Bowl XIX in 1985, and the first in California since Super Bowl XXXVII took place in San Diego in 2003.\",\n \"The contracted batch of 15 Saturn Vs were enough for lunar landing missions through Apollo 20. NASA publicized a preliminary list of eight more planned landing sites, with plans to increase the mass of the CSM and LM for the last five missions, along with the payload capacity of the Saturn V. These final missions would combine the I and J types in the 1967 list, allowing the CMP to operate a package of lunar orbital sensors and cameras while his companions were on the surface, and allowing them to stay on the Moon for over three days. These missions would also carry the Lunar Roving Vehicle (LRV) increasing the exploration area and allowing televised liftoff of the LM. Also, the Block II spacesuit was revised for the extended missions to allow greater flexibility and visibility for driving the LRV.\",\n \"In July 1977, General Zia-ul-Haq overthrew Prime Minister Zulfiqar Ali Bhutto's regime in Pakistan. Ali Bhutto, a leftist in democratic competition with Islamists, had announced banning alcohol and nightclubs within six months, shortly before he was overthrown. Zia-ul-Haq was much more committed to Islamism, and \\\"Islamization\\\" or implementation of Islamic law, became a cornerstone of his eleven-year military dictatorship and Islamism became his \\\"official state ideology\\\". Zia ul Haq was an admirer of Mawdudi and Mawdudi's party Jamaat-e-Islami became the \\\"regime's ideological and political arm\\\". In Pakistan this Islamization from above was \\\"probably\\\" more complete \\\"than under any other regime except those in Iran and Sudan,\\\" but Zia-ul-Haq was also criticized by many Islamists for imposing \\\"symbols\\\" rather than substance, and using Islamization to legitimize his means of seizing power. Unlike neighboring Iran, Zia-ul-Haq's policies were intended to \\\"avoid revolutionary excess\\\", and not to strain relations with his American and Persian Gulf state allies. Zia-ul-Haq was killed in 1988 but Islamization remains an important element in Pakistani society.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"How many species of Ctenophores have not been fully described or named?\",\n \"What was Tymnet\",\n \"What is Michael Carrick and Alan Shearer's profession?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answers\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 40,\n \"samples\": [\n \"possibly another 25\",\n \"an international data communications network headquartered in San Jose, CA\",\n \"international footballers\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}", - "type": "dataframe", - "variable_name": "test_df" - }, - "text/html": [ - "\n", - "
\n", - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
idtitlecontextquestionanswers
056de3cd0cffd8e1900b4b6bfNormansNormans came into Scotland, building castles a...What culture's arrival in Scotland is know as ...Norman
\n", - "
\n", - "
\n", - "\n", - "
\n", - " \n", - "\n", - " \n", - "\n", - " \n", - "
\n", - "\n", - "\n", - "
\n", - "
\n" - ], - "text/plain": [ - " id title \\\n", - "0 56de3cd0cffd8e1900b4b6bf Normans \n", - "\n", - " context \\\n", - "0 Normans came into Scotland, building castles a... \n", - "\n", - " question answers \n", - "0 What culture's arrival in Scotland is know as ... Norman " - ] - }, - "execution_count": 181, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "LkOmXpegA8CW" + }, + "outputs": [], "source": [ "test_df = pd.read_csv('squad_test.csv')\n", "test_df.head(1)" @@ -583,7 +405,7 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": null, "metadata": { "id": "c0pgJycOekZ3" }, @@ -594,25 +416,11 @@ }, { "cell_type": "code", - "execution_count": 182, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "N_u3VzUMsyqj", - "outputId": "ff16f581-73de-4595-aeed-6a80b39e8d4e" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Answer the question based on the context\n", - "Context: In July 1973, as part of its outreach programme to young people, the V&A became the first museum in Britain to present a rock concert. The V&A presented a combined concert/lecture by British progressive folk-rock band Gryphon, who explored the lineage of mediaeval music and instrumentation and related how those contributed to contemporary music 500 years later. This innovative approach to bringing young people to museums was a hallmark of the directorship of Roy Strong and was subsequently emulated by some other British museums.\n", - "Question: Which musical group did the V&A present in July 1973 as part of its youth outreach programme?\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "N_u3VzUMsyqj" + }, + "outputs": [], "source": [ "# combine the systeminstruct + context + question into one column.\n", "row_dataset = 6\n", @@ -634,7 +442,7 @@ }, { "cell_type": "code", - "execution_count": 183, + "execution_count": null, "metadata": { "id": "t-5X4goiqqBQ" }, @@ -655,7 +463,7 @@ }, { "cell_type": "code", - "execution_count": 184, + "execution_count": null, "metadata": { "id": "cXencUYc6YAE" }, @@ -685,25 +493,11 @@ }, { "cell_type": "code", - "execution_count": 186, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "gKa0wLooa3Is", - "outputId": "ce10cb15-31c9-4fbe-af4d-6c8d65139648" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Gemini response: The V&A presented the British progressive folk-rock band Gryphon.\n", - "\n", - "Actual answer: Gryphon\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "gKa0wLooa3Is" + }, + "outputs": [], "source": [ "test_answer = test_df[\"answers\"].iloc[row_dataset]\n", "\n", @@ -728,24 +522,11 @@ }, { "cell_type": "code", - "execution_count": 188, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "dCe0CUsi5E-Y", - "outputId": "8e89fbf4-7483-448e-b50a-4bfd50adeb75" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Number of strings in y_pred: 40\n", - "Number of strings in y_true: 40\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "dCe0CUsi5E-Y" + }, + "outputs": [], "source": [ "num_strings_pred = np.sum([isinstance(item, str) for item in y_pred])\n", "print(f\"Number of strings in y_pred: {num_strings_pred}\")\n", @@ -773,7 +554,7 @@ }, { "cell_type": "code", - "execution_count": 190, + "execution_count": null, "metadata": { "id": "XcgEpTU55FFc" }, @@ -813,24 +594,11 @@ }, { "cell_type": "code", - "execution_count": 191, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "rhDTq9p_GSBP", - "outputId": "4dde775e-2466-4ef7-e380-d23abddc6690" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "EM score: 0.0\n", - "F1 score: 0.030862136294937427\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "rhDTq9p_GSBP" + }, + "outputs": [], "source": [ "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", "print(f\"EM score: {em}\")\n", @@ -885,7 +653,7 @@ }, { "cell_type": "code", - "execution_count": 192, + "execution_count": null, "metadata": { "id": "4DqrQp4cLqRy" }, @@ -901,24 +669,11 @@ }, { "cell_type": "code", - "execution_count": 197, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Pmzyz1migvHN", - "outputId": "38b0b753-b526-41a8-d124-d73baa2152bc" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "JSONL data written to squad_train.jsonl\n", - "JSONL data written to squad_validation.jsonl\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "Pmzyz1migvHN" + }, + "outputs": [], "source": [ "def df_to_jsonl(df, output_file):\n", " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", @@ -961,28 +716,11 @@ }, { "cell_type": "code", - "execution_count": 195, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "O5k1jYJ10IeW", - "outputId": "f6af525a-0c69-414b-a9f7-7340879f4868" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Copying file://./squad_train.jsonl [Content-Type=application/octet-stream]...\n", - "/ [1 files][527.0 KiB/527.0 KiB] \n", - "Operation completed over 1 objects/527.0 KiB. \n", - "Copying file://./squad_validation.jsonl [Content-Type=application/octet-stream]...\n", - "/ [1 files][110.9 KiB/110.9 KiB] \n", - "Operation completed over 1 objects/110.9 KiB. \n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "O5k1jYJ10IeW" + }, + "outputs": [], "source": [ "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" @@ -1012,111 +750,11 @@ }, { "cell_type": "code", - "execution_count": 139, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 181 - }, - "id": "qj-LjQ5Vbf1E", - "outputId": "5af1f956-d5e3-4111-c100-85e60cc90890" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:vertexai.tuning._tuning:Creating SupervisedTuningJob\n", - "INFO:vertexai.tuning._tuning:SupervisedTuningJob created. Resource name: projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840\n", - "INFO:vertexai.tuning._tuning:To use this SupervisedTuningJob in another session:\n", - "INFO:vertexai.tuning._tuning:tuning_job = sft.SupervisedTuningJob('projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840')\n", - "INFO:vertexai.tuning._tuning:View Tuning Job:\n", - "https://console.cloud.google.com/vertex-ai/generative/language/locations/us-central1/tuning/tuningJob/8356726629560483840?project=713601331534\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " tune\n", - " View Tuning Job\n", - " \n", - " \n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": { + "id": "qj-LjQ5Vbf1E" + }, + "outputs": [], "source": [ "tuned_model_display_name = \"erwinh-fine-tuning-flash\" # @param {type:\"string\"}\n", "\n", @@ -1131,86 +769,11 @@ }, { "cell_type": "code", - "execution_count": 198, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "3tXawW1p8E5-", - "outputId": "83b11366-6ff2-4900-d6af-3a0f7a9aca19" - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840',\n", - " 'tunedModelDisplayName': 'erwinh-fine-tuning-flash',\n", - " 'baseModel': 'gemini-1.5-flash-002',\n", - " 'supervisedTuningSpec': {'trainingDatasetUri': 'gs://tuning-demo-erwinh/gemini-tuning/squad_train.jsonl',\n", - " 'validationDatasetUri': 'gs://tuning-demo-erwinh/gemini-tuning/squad_validation.jsonl',\n", - " 'hyperParameters': {'epochCount': '10',\n", - " 'learningRateMultiplier': 1.0,\n", - " 'adapterSize': 'ADAPTER_SIZE_EIGHT'}},\n", - " 'state': 'JOB_STATE_SUCCEEDED',\n", - " 'createTime': '2024-10-09T06:20:11.698883Z',\n", - " 'startTime': '2024-10-09T06:20:11.740358Z',\n", - " 'endTime': '2024-10-09T06:32:38.867719Z',\n", - " 'updateTime': '2024-10-09T06:32:38.867719Z',\n", - " 'experiment': 'projects/713601331534/locations/us-central1/metadataStores/default/contexts/tuning-experiment-20241008232013040864',\n", - " 'tunedModel': {'model': 'projects/713601331534/locations/us-central1/models/1582035604160380928@1',\n", - " 'endpoint': 'projects/713601331534/locations/us-central1/endpoints/5693131570647400448'},\n", - " 'tuningDataStats': {'supervisedTuningDataStats': {'tuningDatasetExampleCount': '500',\n", - " 'userInputTokenDistribution': {'sum': '94474',\n", - " 'min': 54.0,\n", - " 'max': 602.0,\n", - " 'mean': 188.948,\n", - " 'median': 172.0,\n", - " 'p5': 107.0,\n", - " 'p95': 327.0,\n", - " 'buckets': [{'count': 126.0, 'left': 54.0, 'right': 145.0},\n", - " {'count': 273.0, 'left': 146.0, 'right': 236.0},\n", - " {'count': 77.0, 'left': 237.0, 'right': 328.0},\n", - " {'count': 21.0, 'left': 329.0, 'right': 419.0},\n", - " {'count': 2.0, 'left': 420.0, 'right': 511.0},\n", - " {'count': 1.0, 'left': 512.0, 'right': 602.0}]},\n", - " 'userOutputTokenDistribution': {'sum': '2420',\n", - " 'min': 1.0,\n", - " 'max': 43.0,\n", - " 'mean': 4.84,\n", - " 'median': 4.0,\n", - " 'p5': 1.0,\n", - " 'p95': 15.0,\n", - " 'buckets': [{'count': 440.0, 'left': 1.0, 'right': 8.0},\n", - " {'count': 38.0, 'left': 9.0, 'right': 15.0},\n", - " {'count': 16.0, 'left': 16.0, 'right': 22.0},\n", - " {'count': 4.0, 'left': 23.0, 'right': 29.0},\n", - " {'count': 1.0, 'left': 30.0, 'right': 36.0},\n", - " {'count': 1.0, 'left': 37.0, 'right': 43.0}]},\n", - " 'userMessagePerExampleDistribution': {'sum': '1000',\n", - " 'min': 2.0,\n", - " 'max': 2.0,\n", - " 'mean': 2.0,\n", - " 'median': 2.0,\n", - " 'p5': 2.0,\n", - " 'p95': 2.0,\n", - " 'buckets': [{'count': 500.0, 'left': 2.0, 'right': 2.0}]},\n", - " 'userDatasetExamples': [{'role': 'user',\n", - " 'parts': [{'text': 'Context: On the next day, December 18, protests turned into civil unrest as clashes between troops, volunteers, militia units, and Kazakh students turned into a wide-scale confrontation. The clashes could only be controlled on the third day. The Almaty events were followed by smaller protests and demonstrations in Shymkent, Pavlodar, Karaganda, and Taldykorgan. Reports from Kazakh SSR authoriti...'}]},\n", - " {'role': 'model', 'parts': [{'text': '3,000'}]},\n", - " {'role': 'user',\n", - " 'parts': [{'text': \"Context: Roman Catholicism was the sole established religion in the Holy Roman Empire until the Reformation changed this drastically. In 1517, Martin Luther challenged the Catholic Church as he saw it as a corruption of Christian faith. Through this, he altered the course of European and world history and established Protestantism. The Thirty Years' War (1618–1648) was one of the most destructi...\"}]},\n", - " {'role': 'model', 'parts': [{'text': 'Roman Catholicism'}]},\n", - " {'role': 'user',\n", - " 'parts': [{'text': \"Context: Israel retaliated against Egyptian shelling with commando raids, artillery shelling and air strikes. This resulted in an exodus of civilians from Egyptian cities along the Suez Canal's western bank. Nasser ceased all military activities and began a program to build a network of internal defenses, while receiving the financial backing of various Arab states. The war resumed in March 196...\"}]},\n", - " {'role': 'model', 'parts': [{'text': 'March 1969'}]}],\n", - " 'totalBillableTokenCount': '96894'}}}" - ] - }, - "execution_count": 198, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "3tXawW1p8E5-" + }, + "outputs": [], "source": [ "# Get the tuning job info.\n", "sft_tuning_job.to_dict()" @@ -1218,30 +781,11 @@ }, { "cell_type": "code", - "execution_count": 199, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "19aQnN-k84d9", - "outputId": "5c7f8c4f-566f-4c12-ffc7-60b4340d03d3" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'projects/713601331534/locations/us-central1/tuningJobs/8356726629560483840'" - ] - }, - "execution_count": 199, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "19aQnN-k84d9" + }, + "outputs": [], "source": [ "# Get the resource name of the tuning job\n", "sft_tuning_job_name = sft_tuning_job.resource_name\n", @@ -1270,24 +814,11 @@ }, { "cell_type": "code", - "execution_count": 200, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Njag_3cB86rH", - "outputId": "b1408519-3735-4aca-86aa-89b0da5699b1" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CPU times: user 67.5 ms, sys: 4.98 ms, total: 72.5 ms\n", - "Wall time: 1.28 s\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "Njag_3cB86rH" + }, + "outputs": [], "source": [ "%%time\n", "# Wait for job completion\n", @@ -1297,30 +828,11 @@ }, { "cell_type": "code", - "execution_count": 201, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "dkx92RBdbf27", - "outputId": "0641746b-de08-4f9b-80cc-5afef558e7ea" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'projects/713601331534/locations/us-central1/models/1582035604160380928@1'" - ] - }, - "execution_count": 201, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "dkx92RBdbf27" + }, + "outputs": [], "source": [ "# tuned model name\n", "tuned_model_name = sft_tuning_job.tuned_model_name\n", @@ -1329,30 +841,11 @@ }, { "cell_type": "code", - "execution_count": 202, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "e09aB_9Ebf5c", - "outputId": "c8a028c1-da5f-4e7d-d9db-ecde9c5293e1" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'projects/713601331534/locations/us-central1/endpoints/5693131570647400448'" - ] - }, - "execution_count": 202, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "id": "e09aB_9Ebf5c" + }, + "outputs": [], "source": [ "# tuned model endpoint name\n", "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", @@ -1382,7 +875,7 @@ }, { "cell_type": "code", - "execution_count": 155, + "execution_count": null, "metadata": { "id": "DH0guHM---Jo" }, @@ -1399,7 +892,7 @@ }, { "cell_type": "code", - "execution_count": 156, + "execution_count": null, "metadata": { "id": "hggHQFIl_FXC" }, @@ -1413,7 +906,7 @@ }, { "cell_type": "code", - "execution_count": 157, + "execution_count": null, "metadata": { "id": "BdHKZdqG_bHf" }, @@ -1440,7 +933,7 @@ }, { "cell_type": "code", - "execution_count": 158, + "execution_count": null, "metadata": { "id": "_pDrlpA7_e9o" }, @@ -1453,56 +946,11 @@ }, { "cell_type": "code", - "execution_count": 161, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 542 - }, - "id": "DL07j7u__iZx", - "outputId": "3cff463b-59f7-4db3-a884-e3a099583ab5" - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "
\n", - "
\n", - "\n", - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": { + "id": "DL07j7u__iZx" + }, + "outputs": [], "source": [ "# Plot the train and eval loss metrics using Plotly python library\n", "fig = make_subplots(\n", @@ -1536,39 +984,11 @@ }, { "cell_type": "code", - "execution_count": 162, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "dYygz5ph_icf", - "outputId": "f15d6aa8-bbf3-46ca-e919-e91760c9ba90" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "***Testing***\n", - "candidates {\n", - " content {\n", - " role: \"model\"\n", - " parts {\n", - " text: \"European Council\\n\\n\"\n", - " }\n", - " }\n", - " finish_reason: STOP\n", - " avg_logprobs: -0.11596920092900594\n", - "}\n", - "usage_metadata {\n", - " prompt_token_count: 290\n", - " candidates_token_count: 3\n", - " total_token_count: 293\n", - "}\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "dYygz5ph_icf" + }, + "outputs": [], "source": [ "if True:\n", " tuned_genai_model = GenerativeModel(tuned_model_endpoint_name)\n", @@ -1584,20 +1004,9 @@ " print(\"Error:\", sft_tuning_job.error)" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "AbTaqCMxNf18" - }, - "source": [ - "### Model usage and evaluation.\n", - "\n", - "Next you will evaluate the model to see how well it performs. You can also compare it to the benchmark." - ] - }, { "cell_type": "code", - "execution_count": 164, + "execution_count": null, "metadata": { "id": "W4YMNGuoDajB" }, @@ -1620,7 +1029,7 @@ }, { "cell_type": "code", - "execution_count": 165, + "execution_count": null, "metadata": { "id": "69FMuAeoDrm5" }, @@ -1637,24 +1046,11 @@ }, { "cell_type": "code", - "execution_count": 166, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "yj76Tu6ODalZ", - "outputId": "f5d76e1d-1fbf-4251-9c19-4f430a97ad0b" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "EM score: 0.0\n", - "F1 score: 0.2399679487179487\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "yj76Tu6ODalZ" + }, + "outputs": [], "source": [ "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", "print(f\"EM score: {em}\")\n", @@ -1666,7 +1062,11 @@ "metadata": { "id": "Vkb2qXljFYqX" }, - "source": [] + "source": [ + "\n", + "\n", + "\n" + ] } ], "metadata": { From faa02b7c46ab04a500b624b560db24b265196902 Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Thu, 10 Oct 2024 11:22:26 +0800 Subject: [PATCH 55/76] minor updates and fixes (#1231) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...upervised_finetuning_using_gemini_qa.ipynb | 1130 +++++++++++++++++ 1 file changed, 1130 insertions(+) create mode 100644 gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb diff --git a/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb new file mode 100644 index 0000000000..5456046578 --- /dev/null +++ b/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb @@ -0,0 +1,1130 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "source": [ + "# Supervised Fine Tuning with Gemini 1.5 Flash for Q&A\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ], + "metadata": { + "id": "ojoyvz6mH1Hv" + } + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Erwin Huizenga](https://github.com/erwinh85) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "## Overview\n", + "\n", + "**Gemini** is a family of generative AI models developed by Google DeepMind designed for multimodal use cases. The Gemini API gives you access to the various Gemini models, such as Gemini 1.5 Pro and Gemini 1.5 Flash.\n", + "This notebook demonstrates fine-tuning the Gemini 1.5 Flahs using the Vertex AI Supervised Tuning feature. Supervised Tuning allows you to use your own labeled training data to further refine the base model's capabilities toward your specific tasks.\n", + "Supervised Tuning uses labeled examples to tune a model. Each example demonstrates the output you want from your text model during inference.\n", + "First, ensure your training data is of high quality, well-labeled, and directly relevant to the target task. This is crucial as low-quality data can adversely affect the performance and introduce bias in the fine-tuned model.\n", + "Training: Experiment with different configurations to optimize the model's performance on the target task.\n", + "Evaluation:\n", + "Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", + "Evaluation Set: Use a separate set of data to evaluate the model's performance\n", + "\n", + "\n", + "Refer to public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning) for more details.\n", + "\n", + "
\n", + "\n", + "Before running this notebook, ensure you have:\n", + "\n", + "- A Google Cloud project: Provide your project ID in the `PROJECT_ID` variable.\n", + "\n", + "- Authenticated your Colab environment: Run the authentication code block at the beginning.\n", + "\n", + "- Prepared training data (Test with your own data or use the one in the notebook): Data should be formatted in JSONL with prompts and corresponding completions." + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Costs\n", + "\n", + "This tutorial uses billable components of Google Cloud:\n", + "\n", + "* Vertex AI\n", + "* Cloud Storage\n", + "\n", + "Learn about [Vertex AI\n", + "pricing](https://cloud.google.com/vertex-ai/pricing), [Cloud Storage\n", + "pricing](https://cloud.google.com/storage/pricing), and use the [Pricing\n", + "Calculator](https://cloud.google.com/products/calculator/)\n", + "to generate a cost estimate based on your projected usage.\n", + "\n", + "To get an estimate of the number of tokens" + ], + "metadata": { + "id": "f7SS5pzuIA-1" + } + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --upgrade --user --quiet google-cloud-aiplatform datasets" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "# Vertex AI SDK\n", + "from google.cloud import aiplatform\n", + "from google.cloud.aiplatform.metadata import context\n", + "from google.cloud.aiplatform.metadata import utils as metadata_utils\n", + "from vertexai.generative_models import (\n", + " GenerationConfig,\n", + " GenerativeModel,\n", + " HarmBlockThreshold,\n", + " HarmCategory,\n", + ")\n", + "from vertexai.preview.tuning import sft\n", + "\n", + "# Vertex AI SDK\n", + "from sklearn.metrics import f1_score\n", + "from sklearn.feature_extraction.text import TfidfVectorizer\n", + "import pandas as pd\n", + "import array\n", + "import time\n", + "from datasets import load_dataset\n", + "import numpy as np\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "import json" + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Data\n", + "\n", + "#### SQuAD dataset\n", + "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", + "\n", + "You can fine more information on the SQuAD [github page](https://rajpurkar.github.io/SQuAD-explorer/)**bold text**" + ], + "metadata": { + "id": "6bBZa2I-c-x8" + } + }, + { + "cell_type": "markdown", + "source": [ + "First update the `BUCKET_NAME` parameter below. You can either use an existing bucket or create a new one." + ], + "metadata": { + "id": "KhebDJjRKePL" + } + }, + { + "cell_type": "code", + "source": [ + "# Provide a bucket name\n", + "BUCKET_NAME = \"\" # @param {type:\"string\"}\n", + "BUCKET_URI = f\"gs://{BUCKET_NAME}\"\n", + "print(BUCKET_URI)" + ], + "metadata": { + "id": "lit30Cktbfvo" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Only run the code below if you want to create a new Google Cloud Storage bucket." + ], + "metadata": { + "id": "ed-G-9cyKmPY" + } + }, + { + "cell_type": "code", + "source": [ + "# ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" + ], + "metadata": { + "id": "0UJ8S9YFA1pZ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next you will copy the data into your bucket." + ], + "metadata": { + "id": "izjwF63tLLEq" + } + }, + { + "cell_type": "code", + "source": [ + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_test.csv .\n", + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_train.csv .\n", + "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_validation.csv ." + ], + "metadata": { + "id": "wjvcxx_sA3xP" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Baseline\n", + "\n", + "Next you will prepare some test data that you will use to establish a baseline. This means evaluating your chosen model on a representative sample of your dataset before any fine-tuning. A baseline allows you to quantify the improvements achieved through fine-tuning." + ], + "metadata": { + "id": "3F10LuZeL3kt" + } + }, + { + "cell_type": "code", + "source": [ + "test_df = pd.read_csv('squad_test.csv')\n", + "test_df.head(1)" + ], + "metadata": { + "id": "LkOmXpegA8CW" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "You will need to do some dataset preperations. We will add a system instruction to the dataset:\n", + "\n", + "`SystemInstruct`: System instructions are a set of instructions that the model processes before it processes prompts. We recommend that you use system instructions to tell the model how you want it to behave and respond to prompts.\n", + "\n", + "We will also combine the `context` and `question`." + ], + "metadata": { + "id": "mrLxcVVcMsNO" + } + }, + { + "cell_type": "code", + "source": [ + "systemInstruct = \"Answer the question based on the context\"" + ], + "metadata": { + "id": "c0pgJycOekZ3" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# combine the systeminstruct + context + question into one column.\n", + "row_dataset = 6\n", + "\n", + "test_df[\"input_question\"] = systemInstruct + \"\\n\" + \"Context: \" + test_df[\"context\"] + \"\\n\" + \"Question: \" + test_df[\"question\"]\n", + "test_question = test_df[\"input_question\"].iloc[row_dataset]\n", + "print(test_question)" + ], + "metadata": { + "id": "N_u3VzUMsyqj" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next, set the model that you will use. In this example you will use `gemini-1.5-flash-002`. A multimodal model that is designed for high-volume, cost-effective applications, and which delivers speed and efficiency to build fast, lower-cost applications that don't compromise on quality.\n", + "\n" + ], + "metadata": { + "id": "FSxYYqMGWrmj" + } + }, + { + "cell_type": "code", + "source": [ + "base_model = \"gemini-1.5-flash-002\"\n", + "generation_model = GenerativeModel(base_model)" + ], + "metadata": { + "id": "t-5X4goiqqBQ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next lets take a question and get a prediction from Gemini that we can compare to the actual answer." + ], + "metadata": { + "id": "wyscyIenW4WZ" + } + }, + { + "cell_type": "code", + "source": [ + "y_true = test_df[\"answers\"].values\n", + "y_pred_question = test_df[\"question\"].values" + ], + "metadata": { + "id": "ejjhfynQWc0k" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def get_predictions(question: str) -> str:\n", + " \"\"\"Generates predictions for a given test question.\n", + "\n", + " Args:\n", + " test_question: The question to generate predictions for.\n", + "\n", + " Returns:\n", + " The generated prediction text.\n", + " \"\"\"\n", + "\n", + " prompt = f\"{question}\"\n", + "\n", + " generation_config = GenerationConfig(\n", + " temperature=0.1)\n", + "\n", + " response = generation_model.generate_content(\n", + " contents=prompt, generation_config=generation_config\n", + " ).text\n", + "\n", + " return response" + ], + "metadata": { + "id": "cXencUYc6YAE" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "test_answer = test_df[\"answers\"].iloc[row_dataset]\n", + "\n", + "response = get_predictions(test_question)\n", + "\n", + "print(f\"Gemini response: {response}\")\n", + "print(f\"Actual answer: {test_answer}\")" + ], + "metadata": { + "id": "gKa0wLooa3Is" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "You can see that both answers are correct, but the response from Gemini is more lengthy. However, answers in the SQuAD dataset are typically concise and clear.\n", + "\n", + "Fine-tuning is a great way to control the type of output your use case requires. In this instance, you would want the model to provide short, clear answers.\n", + "\n", + "Next, let's check if each dataset has an equal number of examples." + ], + "metadata": { + "id": "OGRJTHKrdujw" + } + }, + { + "cell_type": "code", + "source": [ + "y_pred = test_df[\"question\"].values\n", + "\n", + "num_strings_pred = np.sum([isinstance(item, str) for item in y_pred])\n", + "print(f\"Number of strings in y_pred: {num_strings_pred}\")\n", + "\n", + "num_strings_true = np.sum([isinstance(item, str) for item in y_true])\n", + "print(f\"Number of strings in y_true: {num_strings_true}\")" + ], + "metadata": { + "id": "dCe0CUsi5E-Y" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next lest establish a baseline using evaluation metrics.\n", + "\n", + "Evaluating the performance of a Question Answering (QA) system requires specific metrics. Two commonly used metrics are Exact Match (EM) and F1 score.\n", + "\n", + "EM is a strict measure that only considers an answer correct if it perfectly matches the ground truth, even down to the punctuation. It's a binary metric - either 1 for a perfect match or 0 otherwise. This makes it sensitive to minor variations in phrasing.\n", + "\n", + "F1 score is more flexible. It considers the overlap between the predicted answer and the true answer in terms of individual words or tokens. It calculates the harmonic mean of precision (proportion of correctly predicted words out of all predicted words) and recall (proportion of correctly predicted words out of all true answer words). This allows for partial credit and is less sensitive to minor wording differences.\n", + "\n", + "In practice, EM is useful when exact wording is crucial, while F1 is more suitable when evaluating the overall understanding and semantic accuracy of the QA system. Often, both metrics are used together to provide a comprehensive evaluation." + ], + "metadata": { + "id": "hvi7m8pKE8WB" + } + }, + { + "cell_type": "code", + "source": [ + "def calculate_em_and_f1_for_text_arrays(y_true, y_pred, average='weighted'):\n", + " \"\"\"\n", + " Calculates the Exact Match (EM) and F1 score for arrays of text\n", + " using word-level comparisons.\n", + "\n", + " Args:\n", + " y_true: An array of ground truth strings.\n", + " y_pred: An array of predicted strings.\n", + " average: The averaging method to use for F1 score.\n", + "\n", + " Returns:\n", + " A tuple containing the EM score and the F1 score.\n", + " \"\"\"\n", + "\n", + " em = np.mean([t == p for t, p in zip(y_true, y_pred)])\n", + "\n", + " # Use TF-IDF to convert strings to numerical vectors\n", + " vectorizer = TfidfVectorizer()\n", + " all_text = np.concatenate((y_true, y_pred))\n", + " vectorizer.fit(all_text)\n", + " y_true_vec = vectorizer.transform(y_true)\n", + " y_pred_vec = vectorizer.transform(y_pred)\n", + "\n", + " # Calculate F1 score based on common words (non-zero elements)\n", + " y_true_class = (y_true_vec > 0).toarray().astype(int)\n", + " y_pred_class = (y_pred_vec > 0).toarray().astype(int)\n", + "\n", + " f1 = f1_score(y_true_class, y_pred_class, average=average)\n", + "\n", + " return em, f1\n" + ], + "metadata": { + "id": "XcgEpTU55FFc" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", + "print(f\"EM score: {em}\")\n", + "print(f\"F1 score: {f1}\")" + ], + "metadata": { + "id": "rhDTq9p_GSBP" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Prepare the data for fine-tuning\n", + "\n", + "To optimize the tuning process for a foundation model, ensure your dataset includes examples that reflect the desired task. Structure your training data in a text-to-text format, where each record in the dataset pairs an input text (or prompt) with its corresponding expected output. This supervised tuning approach uses the dataset to effectively teach the model the specific behavior or task you need it to perform, by providing numerous illustrative examples.\n", + "\n", + "The size of your dataset will vary depending on the complexity of the task, but as a general rule, the more examples you include, the better the model's performance.\n", + "\n", + "Dataset Format\n", + "Your training data should be structured in a JSONL file and stored at a Google Cloud Storage (GCS) URI. Each line in the JSONL file must adhere to the following schema:\n", + "\n", + "A `contents` array containing objects that define:\n", + "- A `role` (\"user\" for user input or \"model\" for model output)\n", + "- `parts` containing the input data.\n", + "\n", + "```\n", + "{\n", + " \"contents\":[\n", + " {\n", + " \"role\":\"user\", # This indicate input content\n", + " \"parts\":[\n", + " {\n", + " \"text\":\"How are you?\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"role\":\"model\", # This indicate target content\n", + " \"parts\":[ # text only\n", + " {\n", + " \"text\":\"I am good, thank you!\"\n", + " }\n", + " ]\n", + " }\n", + " # ... repeat \"user\", \"model\" for multi turns.\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Refer to the public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-prepare#about-datasets) for more details." + ], + "metadata": { + "id": "22DfexbNfUHm" + } + }, + { + "cell_type": "code", + "source": [ + "# combine the systeminstruct + context + question into one column.\n", + "train_df = pd.read_csv('squad_train.csv')\n", + "validation_df = pd.read_csv('squad_validation.csv')\n", + "\n", + "train_df[\"input_question\"] = systemInstruct + \"Context: \" + train_df[\"context\"] + \"Question: \" + train_df[\"question\"]\n", + "validation_df[\"input_question\"] = systemInstruct + \"Context: \" + validation_df[\"context\"] + \"Question: \" + validation_df[\"question\"]" + ], + "metadata": { + "id": "4DqrQp4cLqRy" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def df_to_jsonl(df, output_file):\n", + " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", + "\n", + " Args:\n", + " df: The DataFrame to convert.\n", + " output_file: The name of the output file.\n", + " \"\"\"\n", + "\n", + " with open(output_file, 'w') as f:\n", + " for row in df.itertuples(index=False):\n", + " jsonl_obj = {\n", + " \"systemInstruction\": {\"parts\": [{\"text\": \"Answer the question based on the provided context.\"}]},\n", + " \"contents\": [\n", + " {\n", + " \"role\": \"user\",\n", + " \"parts\": [{\"text\": f\"Context: {row.context}\\n\\nQuestion: {row.question}\"}]\n", + " },\n", + " {\"role\": \"model\", \"parts\": [{\"text\": row.answers}]},\n", + " ]\n", + " }\n", + " f.write(json.dumps(jsonl_obj) + '\\n')\n", + "\n", + "# Process the DataFrames\n", + "df_to_jsonl(train_df, 'squad_train.jsonl')\n", + "df_to_jsonl(validation_df, 'squad_validation.jsonl')\n", + "\n", + "print(f\"JSONL data written to squad_train.jsonl\")\n", + "print(f\"JSONL data written to squad_validation.jsonl\")" + ], + "metadata": { + "id": "Pmzyz1migvHN" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next you will copy the files into your Google Cloud bucket" + ], + "metadata": { + "id": "5OQv-ZMpJDhi" + } + }, + { + "cell_type": "code", + "source": [ + "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", + "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" + ], + "metadata": { + "id": "O5k1jYJ10IeW" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Start fine-tuning job\n", + "Next you can start the fine-tuning job.\n", + "\n", + "- `source_model`: Specifies the base Gemini model version you want to fine-tune.\n", + " - `train_dataset`: Path to your training data in JSONL format.\n", + "\n", + " *Optional parameters*\n", + " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", + " - `tuned_model_display_name`: Display name for the tuned model.\n", + " - `epochs`: The number of training epochs to run.\n", + " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", + " - `adapter_size` : Gemini 1.5 Flash supports Adapter length [1, 4], default value is 4.\n", + "\n", + " **Important**: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" + ], + "metadata": { + "id": "UAHMYgFJJHjm" + } + }, + { + "cell_type": "code", + "source": [ + "tuned_model_display_name = \"fine-tuning-gemini-flash-qa-v01\" # @param {type:\"string\"}\n", + "\n", + "sft_tuning_job = sft.train(\n", + " source_model=base_model,\n", + " train_dataset=f\"\"\"{BUCKET_URI}/squad_train.jsonl\"\"\",\n", + " # # Optional:\n", + " validation_dataset=f\"\"\"{BUCKET_URI}/squad_validation.jsonl\"\"\",\n", + " tuned_model_display_name=tuned_model_display_name,\n", + ")" + ], + "metadata": { + "id": "qj-LjQ5Vbf1E" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Get the tuning job info.\n", + "sft_tuning_job.to_dict()" + ], + "metadata": { + "id": "3tXawW1p8E5-" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Get the resource name of the tuning job\n", + "sft_tuning_job_name = sft_tuning_job.resource_name\n", + "sft_tuning_job_name" + ], + "metadata": { + "id": "19aQnN-k84d9" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Important:** Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc." + ], + "metadata": { + "id": "UKo8cwF2KVM5" + } + }, + { + "cell_type": "markdown", + "source": [ + "
\n", + "⚠️ It will take ~30 mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", + "
" + ], + "metadata": { + "id": "8NiZnPkIKcwm" + } + }, + { + "cell_type": "code", + "source": [ + "%%time\n", + "# Wait for job completion\n", + "while not sft_tuning_job.refresh().has_ended:\n", + " time.sleep(60)" + ], + "metadata": { + "id": "Njag_3cB86rH" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# tuned model name\n", + "tuned_model_name = sft_tuning_job.tuned_model_name\n", + "tuned_model_name" + ], + "metadata": { + "id": "dkx92RBdbf27" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# tuned model endpoint name\n", + "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", + "tuned_model_endpoint_name" + ], + "metadata": { + "id": "e09aB_9Ebf5c" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "#### Model tuning metrics\n", + "\n", + "- `/train_total_loss`: Loss for the tuning dataset at a training step.\n", + "- `/train_fraction_of_correct_next_step_preds`: The token accuracy at a training step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the tuning dataset.\n", + "- `/train_num_predictions`: Number of predicted tokens at a training step\n", + "\n", + "#### Model evaluation metrics:\n", + "\n", + "- `/eval_total_loss`: Loss for the evaluation dataset at an evaluation step.\n", + "- `/eval_fraction_of_correct_next_step_preds`: The token accuracy at an evaluation step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the evaluation dataset.\n", + "- `/eval_num_predictions`: Number of predicted tokens at an evaluation step.\n", + "\n", + "The metrics visualizations are available after the model tuning job completes. If you don't specify a validation dataset when you create the tuning job, only the visualizations for the tuning metrics are available." + ], + "metadata": { + "id": "gV1ukBznKmlN" + } + }, + { + "cell_type": "code", + "source": [ + "# Get resource name from tuning job.\n", + "experiment_name = sft_tuning_job.experiment.resource_name\n", + "experiment_name" + ], + "metadata": { + "id": "IHVU4XP2aOFE" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Locate Vertex AI Experiment and Vertex AI Experiment Run\n", + "experiment = aiplatform.Experiment(experiment_name=experiment_name)\n", + "filter_str = metadata_utils._make_filter_string(\n", + " schema_title=\"system.ExperimentRun\",\n", + " parent_contexts=[experiment.resource_name],\n", + ")\n", + "experiment_run = context.Context.list(filter_str)[0]" + ], + "metadata": { + "id": "DH0guHM---Jo" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Read data from Tensorboard\n", + "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name.replace(experiment.name, '')[1:]}\"\n", + "tensorboard_run = aiplatform.TensorboardRun(tensorboard_run_name)\n", + "metrics = tensorboard_run.read_time_series_data()" + ], + "metadata": { + "id": "hggHQFIl_FXC" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def get_metrics(metric: str = \"/train_total_loss\"):\n", + " \"\"\"\n", + " Get metrics from Tensorboard.\n", + "\n", + " Args:\n", + " metric: metric name, eg. /train_total_loss or /eval_total_loss.\n", + " Returns:\n", + " steps: list of steps.\n", + " steps_loss: list of loss values.\n", + " \"\"\"\n", + " loss_values = metrics[metric].values\n", + " steps_loss = []\n", + " steps = []\n", + " for loss in loss_values:\n", + " steps_loss.append(loss.scalar.value)\n", + " steps.append(loss.step)\n", + " return steps, steps_loss" + ], + "metadata": { + "id": "BdHKZdqG_bHf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Get Train and Eval Loss\n", + "train_loss = get_metrics(metric=\"/train_total_loss\")\n", + "eval_loss = get_metrics(metric=\"/eval_total_loss\")" + ], + "metadata": { + "id": "_pDrlpA7_e9o" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Plot the train and eval loss metrics using Plotly python library\n", + "fig = make_subplots(\n", + " rows=1, cols=2, shared_xaxes=True, subplot_titles=(\"Train Loss\", \"Eval Loss\")\n", + ")\n", + "\n", + "# Add traces\n", + "fig.add_trace(\n", + " go.Scatter(x=train_loss[0], y=train_loss[1], name=\"Train Loss\", mode=\"lines\"),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig.add_trace(\n", + " go.Scatter(x=eval_loss[0], y=eval_loss[1], name=\"Eval Loss\", mode=\"lines\"),\n", + " row=1,\n", + " col=2,\n", + ")\n", + "\n", + "# Add figure title\n", + "fig.update_layout(title=\"Train and Eval Loss\", xaxis_title=\"Steps\", yaxis_title=\"Loss\")\n", + "\n", + "# Set x-axis title\n", + "fig.update_xaxes(title_text=\"Steps\")\n", + "\n", + "# Set y-axes titles\n", + "fig.update_yaxes(title_text=\"Loss\")\n", + "\n", + "# Show plot\n", + "fig.show()" + ], + "metadata": { + "id": "DL07j7u__iZx" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Use the fine-tuned model and evaluation" + ], + "metadata": { + "id": "pivmh4Lwbgy1" + } + }, + { + "cell_type": "code", + "source": [ + "prompt = \"\"\"\n", + "Answer the question based on the context\n", + "\n", + "Context: In the 1840s and 50s, there were attempts to overcome this problem by means of various patent valve gears with a separate, variable cutoff expansion valve riding on the back of the main slide valve; the latter usually had fixed or limited cutoff.\n", + "The combined setup gave a fair approximation of the ideal events, at the expense of increased friction and wear, and the mechanism tended to be complicated.\n", + "The usual compromise solution has been to provide lap by lengthening rubbing surfaces of the valve in such a way as to overlap the port on the admission side, with the effect that the exhaust side remains open for a longer period after cut-off on the admission side has occurred.\n", + "This expedient has since been generally considered satisfactory for most purposes and makes possible the use of the simpler Stephenson, Joy and Walschaerts motions.\n", + "Corliss, and later, poppet valve gears had separate admission and exhaust valves driven by trip mechanisms or cams profiled so as to give ideal events; most of these gears never succeeded outside of the stationary marketplace due to various other issues including leakage and more delicate mechanisms.\n", + "\n", + "Question: How is lap provided by overlapping the admission side port?\n", + "\"\"\"" + ], + "metadata": { + "id": "qO6ln4teagw1" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "if True:\n", + " tuned_genai_model = GenerativeModel(tuned_model_endpoint_name)\n", + " # Test with the loaded model.\n", + " print(\"***Testing***\")\n", + " print(\n", + " tuned_genai_model.generate_content(\n", + " contents=prompt\n", + " )\n", + " )\n", + "else:\n", + " print(\"State:\", sft_tuning_job.state)\n", + " print(\"Error:\", sft_tuning_job.error)" + ], + "metadata": { + "id": "dYygz5ph_icf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "y_true = test_df[\"answers\"].values\n", + "\n", + "def get_predictions(test_question):\n", + "\n", + " prompt = f\"\"\"{test_question}\"\"\"\n", + "\n", + " generation_config = GenerationConfig(\n", + " temperature=0.1,\n", + " )\n", + "\n", + " response = tuned_genai_model.generate_content(contents=prompt, generation_config=generation_config).text\n", + "\n", + " return response" + ], + "metadata": { + "id": "W4YMNGuoDajB" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "y_pred = []\n", + "y_pred_question = test_df[\"question\"].values\n", + "\n", + "for i in y_pred_question:\n", + "\n", + " prediction = get_predictions(i)\n", + " y_pred.append(prediction)" + ], + "metadata": { + "id": "69FMuAeoDrm5" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", + "print(f\"EM score: {em}\")\n", + "print(f\"F1 score: {f1}\")" + ], + "metadata": { + "id": "yj76Tu6ODalZ" + }, + "execution_count": null, + "outputs": [] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From dfe6cc6fe18ab4455fdae72c24fdd537fd50e83f Mon Sep 17 00:00:00 2001 From: Erwin Huizenga Date: Thu, 10 Oct 2024 11:29:22 +0800 Subject: [PATCH 56/76] minor updates (#1232) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [X] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [X] You are listed as the author in your notebook or README file. - [X] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [X] Make your Pull Request title in the specification. - [X] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [X] Appropriate docs were updated (if necessary) Fixes # 🦕 --- ...sed_finetuning_using_gemini_qa_ipynb.ipynb | 1083 ----------------- 1 file changed, 1083 deletions(-) delete mode 100644 gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb diff --git a/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb deleted file mode 100644 index 85f96df281..0000000000 --- a/gemini/tuning/supervised_finetuning_using_gemini_qa_ipynb.ipynb +++ /dev/null @@ -1,1083 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ur8xi4C7S06n" - }, - "outputs": [], - "source": [ - "# Copyright 2024 Google LLC\n", - "#\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ojoyvz6mH1Hv" - }, - "source": [ - "# Supervised Fine Tuning with Gemini 1.5 Flash for Q&A\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \"Google
Open in Colab\n", - "
\n", - "
\n", - " \n", - " \"Google
Open in Colab Enterprise\n", - "
\n", - "
\n", - " \n", - " \"Vertex
Open in Workbench\n", - "
\n", - "
\n", - " \n", - " \"GitHub
View on GitHub\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "84f0f73a0f76" - }, - "source": [ - "| | |\n", - "|-|-|\n", - "| Author(s) | [Erwin Huizenga](https://github.com/erwinh85) |" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JAPoU8Sm5E6e" - }, - "source": [ - "## Overview\n", - "\n", - "**Gemini** is a family of generative AI models developed by Google DeepMind designed for multimodal use cases. The Gemini API gives you access to the various Gemini models, such as Gemini 1.5 Pro and Gemini 1.5 Flash.\n", - "This notebook demonstrates fine-tuning the Gemini 1.5 Flahs using the Vertex AI Supervised Tuning feature. Supervised Tuning allows you to use your own labeled training data to further refine the base model's capabilities toward your specific tasks.\n", - "Supervised Tuning uses labeled examples to tune a model. Each example demonstrates the output you want from your text model during inference.\n", - "First, ensure your training data is of high quality, well-labeled, and directly relevant to the target task. This is crucial as low-quality data can adversely affect the performance and introduce bias in the fine-tuned model.\n", - "Training: Experiment with different configurations to optimize the model's performance on the target task.\n", - "Evaluation:\n", - "Metric: Choose appropriate evaluation metrics that accurately reflect the success of the fine-tuned model for your specific task\n", - "Evaluation Set: Use a separate set of data to evaluate the model's performance\n", - "\n", - "\n", - "Refer to public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning) for more details.\n", - "\n", - "
\n", - "\n", - "Before running this notebook, ensure you have:\n", - "\n", - "- A Google Cloud project: Provide your project ID in the `PROJECT_ID` variable.\n", - "\n", - "- Authenticated your Colab environment: Run the authentication code block at the beginning.\n", - "\n", - "- Prepare your training data. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f7SS5pzuIA-1" - }, - "source": [ - "### Costs\n", - "\n", - "This tutorial uses billable components of Google Cloud:\n", - "\n", - "* Vertex AI\n", - "* Cloud Storage\n", - "\n", - "Learn about [Vertex AI\n", - "pricing](https://cloud.google.com/vertex-ai/pricing), [Cloud Storage\n", - "pricing](https://cloud.google.com/storage/pricing), and use the [Pricing\n", - "Calculator](https://cloud.google.com/products/calculator/)\n", - "to generate a cost estimate based on your projected usage.\n", - "\n", - "To get an estimate of the number of tokens" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "61RBz8LLbxCR" - }, - "source": [ - "## Get started" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "No17Cw5hgx12" - }, - "source": [ - "### Install Vertex AI SDK and other required packages\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tFy3H3aPgx12" - }, - "outputs": [], - "source": [ - "%pip install --upgrade --user --quiet google-cloud-aiplatform datasets" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "R5Xep4W9lq-Z" - }, - "source": [ - "### Restart runtime\n", - "\n", - "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", - "\n", - "The restart might take a minute or longer. After it's restarted, continue to the next step." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XRvKdaPDTznN" - }, - "outputs": [], - "source": [ - "import IPython\n", - "\n", - "app = IPython.Application.instance()\n", - "app.kernel.do_shutdown(True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SbmM4z7FOBpM" - }, - "source": [ - "
\n", - "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", - "
\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "dmWOrTJ3gx13" - }, - "source": [ - "### Authenticate your notebook environment (Colab only)\n", - "\n", - "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NyKGtVQjgx13" - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "if \"google.colab\" in sys.modules:\n", - " from google.colab import auth\n", - "\n", - " auth.authenticate_user()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DF4l8DTdWgPY" - }, - "source": [ - "### Set Google Cloud project information and initialize Vertex AI SDK\n", - "\n", - "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", - "\n", - "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Nqwi-5ufWp_B" - }, - "outputs": [], - "source": [ - "# Use the environment variable if the user doesn't provide Project ID.\n", - "import os\n", - "import vertexai\n", - "\n", - "PROJECT_ID = \"\" # @param {type:\"string\", isTemplate: true}\n", - "if PROJECT_ID == \"[your-project-id]\":\n", - " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", - "\n", - "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", - "\n", - "vertexai.init(project=PROJECT_ID, location=LOCATION)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5303c05f7aa6" - }, - "source": [ - "### Import libraries" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6fc324893334" - }, - "outputs": [], - "source": [ - "# Vertex AI SDK\n", - "from google.cloud import aiplatform\n", - "from google.cloud.aiplatform.metadata import context\n", - "from google.cloud.aiplatform.metadata import utils as metadata_utils\n", - "from vertexai.generative_models import (\n", - " GenerationConfig,\n", - " GenerativeModel,\n", - " HarmBlockThreshold,\n", - " HarmCategory,\n", - ")\n", - "from vertexai.preview.tuning import sft\n", - "\n", - "# Vertex AI SDK\n", - "from sklearn.metrics import f1_score\n", - "from sklearn.feature_extraction.text import TfidfVectorizer\n", - "import pandas as pd\n", - "import array\n", - "import time\n", - "from datasets import load_dataset\n", - "import numpy as np\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6bBZa2I-c-x8" - }, - "source": [ - "### Data\n", - "\n", - "#### SQuAD dataset\n", - "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", - "\n", - "You can fine more information on the SQuAD [github page](https://rajpurkar.github.io/SQuAD-explorer/)**bold text**" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KhebDJjRKePL" - }, - "source": [ - "First update the `BUCKET_NAME` parameter below. You can either use an existing bucket or create a new one." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "lit30Cktbfvo" - }, - "outputs": [], - "source": [ - "# Provide a bucket name\n", - "BUCKET_NAME = \"tuning-demo-erwinh/gemini-tuning\" # @param {type:\"string\"}\n", - "BUCKET_URI = f\"gs://{BUCKET_NAME}\"\n", - "print(BUCKET_URI)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ed-G-9cyKmPY" - }, - "source": [ - "Only run the code below if you want to create a new Google Cloud Storage bucket." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0UJ8S9YFA1pZ" - }, - "outputs": [], - "source": [ - "# ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "izjwF63tLLEq" - }, - "source": [ - "Next you will copy the data into your bucket." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wjvcxx_sA3xP" - }, - "outputs": [], - "source": [ - "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_test.csv .\n", - "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_train.csv .\n", - "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_validation.csv ." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3F10LuZeL3kt" - }, - "source": [ - "### Baseline\n", - "\n", - "Next you will prepare some test data that you will use to establish a baseline. This means evaluating your chosen model on a representative sample of your dataset before any fine-tuning. A baseline allows you to quantify the improvements achieved through fine-tuning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LkOmXpegA8CW" - }, - "outputs": [], - "source": [ - "test_df = pd.read_csv('squad_test.csv')\n", - "test_df.head(1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "mrLxcVVcMsNO" - }, - "source": [ - "You will need to do some dataset preperations. We will add a system instruction to the dataset:\n", - "\n", - "`SystemInstruct`: System instructions are a set of instructions that the model processes before it processes prompts. We recommend that you use system instructions to tell the model how you want it to behave and respond to prompts.\n", - "\n", - "We will also combine the `context` and `question`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "c0pgJycOekZ3" - }, - "outputs": [], - "source": [ - "systemInstruct = \"Answer the question based on the context\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "N_u3VzUMsyqj" - }, - "outputs": [], - "source": [ - "# combine the systeminstruct + context + question into one column.\n", - "row_dataset = 6\n", - "\n", - "test_df[\"input_question\"] = systemInstruct + \"\\n\" + \"Context: \" + test_df[\"context\"] + \"\\n\" + \"Question: \" + test_df[\"question\"]\n", - "test_question = test_df[\"input_question\"].iloc[row_dataset]\n", - "print(test_question)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FSxYYqMGWrmj" - }, - "source": [ - "Next, set the model that you will use. In this example you will use `gemini-1.5-flash-002`. A multimodal model that is designed for high-volume, cost-effective applications, and which delivers speed and efficiency to build fast, lower-cost applications that don't compromise on quality.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "t-5X4goiqqBQ" - }, - "outputs": [], - "source": [ - "base_model = \"gemini-1.5-flash-002\"\n", - "generation_model = GenerativeModel(base_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wyscyIenW4WZ" - }, - "source": [ - "Next lets take a question and get a prediction from Gemini that we can compare to the actual answer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "cXencUYc6YAE" - }, - "outputs": [], - "source": [ - "def get_predictions(question: str) -> str:\n", - " \"\"\"Generates predictions for a given test question.\n", - "\n", - " Args:\n", - " test_question: The question to generate predictions for.\n", - "\n", - " Returns:\n", - " The generated prediction text.\n", - " \"\"\"\n", - "\n", - " prompt = f\"{question}\"\n", - "\n", - " generation_config = GenerationConfig(\n", - " temperature=0.1)\n", - "\n", - " response = generation_model.generate_content(\n", - " contents=prompt, generation_config=generation_config\n", - " ).text\n", - "\n", - " return response" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gKa0wLooa3Is" - }, - "outputs": [], - "source": [ - "test_answer = test_df[\"answers\"].iloc[row_dataset]\n", - "\n", - "response = get_predictions(test_question)\n", - "\n", - "print(f\"Gemini response: {response}\")\n", - "print(f\"Actual answer: {test_answer}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OGRJTHKrdujw" - }, - "source": [ - "You can see that both answers are correct, but the response from Gemini is more lengthy. However, answers in the SQuAD dataset are typically concise and clear.\n", - "\n", - "Fine-tuning is a great way to control the type of output your use case requires. In this instance, you would want the model to provide short, clear answers.\n", - "\n", - "Next, let's check if each dataset has an equal number of examples." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dCe0CUsi5E-Y" - }, - "outputs": [], - "source": [ - "num_strings_pred = np.sum([isinstance(item, str) for item in y_pred])\n", - "print(f\"Number of strings in y_pred: {num_strings_pred}\")\n", - "\n", - "num_strings_true = np.sum([isinstance(item, str) for item in y_true])\n", - "print(f\"Number of strings in y_true: {num_strings_true}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hvi7m8pKE8WB" - }, - "source": [ - "Next lest establish a baseline using evaluation metrics.\n", - "\n", - "Evaluating the performance of a Question Answering (QA) system requires specific metrics. Two commonly used metrics are Exact Match (EM) and F1 score.\n", - "\n", - "EM is a strict measure that only considers an answer correct if it perfectly matches the ground truth, even down to the punctuation. It's a binary metric - either 1 for a perfect match or 0 otherwise. This makes it sensitive to minor variations in phrasing.\n", - "\n", - "F1 score is more flexible. It considers the overlap between the predicted answer and the true answer in terms of individual words or tokens. It calculates the harmonic mean of precision (proportion of correctly predicted words out of all predicted words) and recall (proportion of correctly predicted words out of all true answer words). This allows for partial credit and is less sensitive to minor wording differences.\n", - "\n", - "In practice, EM is useful when exact wording is crucial, while F1 is more suitable when evaluating the overall understanding and semantic accuracy of the QA system. Often, both metrics are used together to provide a comprehensive evaluation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XcgEpTU55FFc" - }, - "outputs": [], - "source": [ - "def calculate_em_and_f1_for_text_arrays(y_true, y_pred, average='weighted'):\n", - " \"\"\"\n", - " Calculates the Exact Match (EM) and F1 score for arrays of text\n", - " using word-level comparisons.\n", - "\n", - " Args:\n", - " y_true: An array of ground truth strings.\n", - " y_pred: An array of predicted strings.\n", - " average: The averaging method to use for F1 score.\n", - "\n", - " Returns:\n", - " A tuple containing the EM score and the F1 score.\n", - " \"\"\"\n", - "\n", - " em = np.mean([t == p for t, p in zip(y_true, y_pred)])\n", - "\n", - " # Use TF-IDF to convert strings to numerical vectors\n", - " vectorizer = TfidfVectorizer()\n", - " all_text = np.concatenate((y_true, y_pred))\n", - " vectorizer.fit(all_text)\n", - " y_true_vec = vectorizer.transform(y_true)\n", - " y_pred_vec = vectorizer.transform(y_pred)\n", - "\n", - " # Calculate F1 score based on common words (non-zero elements)\n", - " y_true_class = (y_true_vec > 0).toarray().astype(int)\n", - " y_pred_class = (y_pred_vec > 0).toarray().astype(int)\n", - "\n", - " f1 = f1_score(y_true_class, y_pred_class, average=average)\n", - "\n", - " return em, f1\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rhDTq9p_GSBP" - }, - "outputs": [], - "source": [ - "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", - "print(f\"EM score: {em}\")\n", - "print(f\"F1 score: {f1}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "22DfexbNfUHm" - }, - "source": [ - "### Prepare the data for fine-tuning\n", - "\n", - "To optimize the tuning process for a foundation model, ensure your dataset includes examples that reflect the desired task. Structure your training data in a text-to-text format, where each record in the dataset pairs an input text (or prompt) with its corresponding expected output. This supervised tuning approach uses the dataset to effectively teach the model the specific behavior or task you need it to perform, by providing numerous illustrative examples.\n", - "\n", - "The size of your dataset will vary depending on the complexity of the task, but as a general rule, the more examples you include, the better the model's performance.\n", - "\n", - "Dataset Format\n", - "Your training data should be structured in a JSONL file and stored at a Google Cloud Storage (GCS) URI. Each line in the JSONL file must adhere to the following schema:\n", - "\n", - "A `contents` array containing objects that define:\n", - "- A `role` (\"user\" for user input or \"model\" for model output)\n", - "- `parts` containing the input data.\n", - "\n", - "```\n", - "{\n", - " \"contents\":[\n", - " {\n", - " \"role\":\"user\", # This indicate input content\n", - " \"parts\":[\n", - " {\n", - " \"text\":\"How are you?\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"role\":\"model\", # This indicate target content\n", - " \"parts\":[ # text only\n", - " {\n", - " \"text\":\"I am good, thank you!\"\n", - " }\n", - " ]\n", - " }\n", - " # ... repeat \"user\", \"model\" for multi turns.\n", - " ]\n", - "}\n", - "```\n", - "\n", - "Refer to the public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-prepare#about-datasets) for more details." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4DqrQp4cLqRy" - }, - "outputs": [], - "source": [ - "# combine the systeminstruct + context + question into one column.\n", - "train_df = pd.read_csv('squad_train.csv')\n", - "validation_df = pd.read_csv('squad_validation.csv')\n", - "\n", - "train_df[\"input_question\"] = systemInstruct + \"Context: \" + train_df[\"context\"] + \"Question: \" + train_df[\"question\"]\n", - "validation_df[\"input_question\"] = systemInstruct + \"Context: \" + validation_df[\"context\"] + \"Question: \" + validation_df[\"question\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Pmzyz1migvHN" - }, - "outputs": [], - "source": [ - "def df_to_jsonl(df, output_file):\n", - " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", - "\n", - " Args:\n", - " df: The DataFrame to convert.\n", - " output_file: The name of the output file.\n", - " \"\"\"\n", - "\n", - " with open(output_file, 'w') as f:\n", - " for row in df.itertuples(index=False):\n", - " jsonl_obj = {\n", - " \"systemInstruction\": {\"parts\": [{\"text\": \"Answer the question based on the provided context.\"}]},\n", - " \"contents\": [\n", - " {\n", - " \"role\": \"user\",\n", - " \"parts\": [{\"text\": f\"Context: {row.context}\\n\\nQuestion: {row.question}\"}]\n", - " },\n", - " {\"role\": \"model\", \"parts\": [{\"text\": row.answers}]},\n", - " ]\n", - " }\n", - " f.write(json.dumps(jsonl_obj) + '\\n')\n", - "\n", - "# Process the DataFrames\n", - "df_to_jsonl(train_df, 'squad_train.jsonl')\n", - "df_to_jsonl(validation_df, 'squad_validation.jsonl')\n", - "\n", - "print(f\"JSONL data written to squad_train.jsonl\")\n", - "print(f\"JSONL data written to squad_validation.jsonl\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5OQv-ZMpJDhi" - }, - "source": [ - "Next you will copy the files into your Google Cloud bucket" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "O5k1jYJ10IeW" - }, - "outputs": [], - "source": [ - "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", - "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "UAHMYgFJJHjm" - }, - "source": [ - "### Start fine-tuning job\n", - "Next you can start the fine-tuning job.\n", - "\n", - "- `source_model`: Specifies the base Gemini model version you want to fine-tune.\n", - " - `train_dataset`: Path to your training data in JSONL format.\n", - "\n", - " *Optional parameters*\n", - " - `validation_dataset`: If provided, this data is used to evaluate the model during tuning.\n", - " - `tuned_model_display_name`: Display name for the tuned model.\n", - " - `epochs`: The number of training epochs to run.\n", - " - `learning_rate_multiplier`: A value to scale the learning rate during training.\n", - " - `adapter_size` : Gemini 1.5 Flash supports Adapter length [1, 4], default value is 4.\n", - "\n", - " **Important**: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qj-LjQ5Vbf1E" - }, - "outputs": [], - "source": [ - "tuned_model_display_name = \"erwinh-fine-tuning-flash\" # @param {type:\"string\"}\n", - "\n", - "sft_tuning_job = sft.train(\n", - " source_model=base_model,\n", - " train_dataset=f\"\"\"{BUCKET_URI}/squad_train.jsonl\"\"\",\n", - " # # Optional:\n", - " validation_dataset=f\"\"\"{BUCKET_URI}/squad_validation.jsonl\"\"\",\n", - " tuned_model_display_name=tuned_model_display_name,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3tXawW1p8E5-" - }, - "outputs": [], - "source": [ - "# Get the tuning job info.\n", - "sft_tuning_job.to_dict()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "19aQnN-k84d9" - }, - "outputs": [], - "source": [ - "# Get the resource name of the tuning job\n", - "sft_tuning_job_name = sft_tuning_job.resource_name\n", - "sft_tuning_job_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "UKo8cwF2KVM5" - }, - "source": [ - "**Important:** Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8NiZnPkIKcwm" - }, - "source": [ - "
\n", - "⚠️ It will take ~30 mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", - "
" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Njag_3cB86rH" - }, - "outputs": [], - "source": [ - "%%time\n", - "# Wait for job completion\n", - "while not sft_tuning_job.refresh().has_ended:\n", - " time.sleep(60)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dkx92RBdbf27" - }, - "outputs": [], - "source": [ - "# tuned model name\n", - "tuned_model_name = sft_tuning_job.tuned_model_name\n", - "tuned_model_name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "e09aB_9Ebf5c" - }, - "outputs": [], - "source": [ - "# tuned model endpoint name\n", - "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", - "tuned_model_endpoint_name" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gV1ukBznKmlN" - }, - "source": [ - "#### Model tuning metrics\n", - "\n", - "- `/train_total_loss`: Loss for the tuning dataset at a training step.\n", - "- `/train_fraction_of_correct_next_step_preds`: The token accuracy at a training step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the tuning dataset.\n", - "- `/train_num_predictions`: Number of predicted tokens at a training step\n", - "\n", - "#### Model evaluation metrics:\n", - "\n", - "- `/eval_total_loss`: Loss for the evaluation dataset at an evaluation step.\n", - "- `/eval_fraction_of_correct_next_step_preds`: The token accuracy at an evaluation step. A single prediction consists of a sequence of tokens. This metric measures the accuracy of the predicted tokens when compared to the ground truth in the evaluation dataset.\n", - "- `/eval_num_predictions`: Number of predicted tokens at an evaluation step.\n", - "\n", - "The metrics visualizations are available after the model tuning job completes. If you don't specify a validation dataset when you create the tuning job, only the visualizations for the tuning metrics are available." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "DH0guHM---Jo" - }, - "outputs": [], - "source": [ - "# Locate Vertex AI Experiment and Vertex AI Experiment Run\n", - "experiment = aiplatform.Experiment(experiment_name=experiment_name)\n", - "filter_str = metadata_utils._make_filter_string(\n", - " schema_title=\"system.ExperimentRun\",\n", - " parent_contexts=[experiment.resource_name],\n", - ")\n", - "experiment_run = context.Context.list(filter_str)[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "hggHQFIl_FXC" - }, - "outputs": [], - "source": [ - "# Read data from Tensorboard\n", - "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name.replace(experiment.name, '')[1:]}\"\n", - "tensorboard_run = aiplatform.TensorboardRun(tensorboard_run_name)\n", - "metrics = tensorboard_run.read_time_series_data()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BdHKZdqG_bHf" - }, - "outputs": [], - "source": [ - "def get_metrics(metric: str = \"/train_total_loss\"):\n", - " \"\"\"\n", - " Get metrics from Tensorboard.\n", - "\n", - " Args:\n", - " metric: metric name, eg. /train_total_loss or /eval_total_loss.\n", - " Returns:\n", - " steps: list of steps.\n", - " steps_loss: list of loss values.\n", - " \"\"\"\n", - " loss_values = metrics[metric].values\n", - " steps_loss = []\n", - " steps = []\n", - " for loss in loss_values:\n", - " steps_loss.append(loss.scalar.value)\n", - " steps.append(loss.step)\n", - " return steps, steps_loss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_pDrlpA7_e9o" - }, - "outputs": [], - "source": [ - "# Get Train and Eval Loss\n", - "train_loss = get_metrics(metric=\"/train_total_loss\")\n", - "eval_loss = get_metrics(metric=\"/eval_total_loss\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "DL07j7u__iZx" - }, - "outputs": [], - "source": [ - "# Plot the train and eval loss metrics using Plotly python library\n", - "fig = make_subplots(\n", - " rows=1, cols=2, shared_xaxes=True, subplot_titles=(\"Train Loss\", \"Eval Loss\")\n", - ")\n", - "\n", - "# Add traces\n", - "fig.add_trace(\n", - " go.Scatter(x=train_loss[0], y=train_loss[1], name=\"Train Loss\", mode=\"lines\"),\n", - " row=1,\n", - " col=1,\n", - ")\n", - "fig.add_trace(\n", - " go.Scatter(x=eval_loss[0], y=eval_loss[1], name=\"Eval Loss\", mode=\"lines\"),\n", - " row=1,\n", - " col=2,\n", - ")\n", - "\n", - "# Add figure title\n", - "fig.update_layout(title=\"Train and Eval Loss\", xaxis_title=\"Steps\", yaxis_title=\"Loss\")\n", - "\n", - "# Set x-axis title\n", - "fig.update_xaxes(title_text=\"Steps\")\n", - "\n", - "# Set y-axes titles\n", - "fig.update_yaxes(title_text=\"Loss\")\n", - "\n", - "# Show plot\n", - "fig.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dYygz5ph_icf" - }, - "outputs": [], - "source": [ - "if True:\n", - " tuned_genai_model = GenerativeModel(tuned_model_endpoint_name)\n", - " # Test with the loaded model.\n", - " print(\"***Testing***\")\n", - " print(\n", - " tuned_genai_model.generate_content(\n", - " contents=prompt, generation_config=generation_config\n", - " )\n", - " )\n", - "else:\n", - " print(\"State:\", sft_tuning_job.state)\n", - " print(\"Error:\", sft_tuning_job.error)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "W4YMNGuoDajB" - }, - "outputs": [], - "source": [ - "y_true = test_df[\"answers\"].values\n", - "\n", - "def get_predictions(test_question):\n", - "\n", - " prompt = f\"\"\"{test_question}\"\"\"\n", - "\n", - " generation_config = GenerationConfig(\n", - " temperature=0.1,\n", - " )\n", - "\n", - " response = tuned_genai_model.generate_content(contents=prompt, generation_config=generation_config).text\n", - "\n", - " return response" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "69FMuAeoDrm5" - }, - "outputs": [], - "source": [ - "y_pred = []\n", - "y_pred_question = test_df[\"question\"].values\n", - "\n", - "for i in y_pred_question:\n", - "\n", - " prediction = get_predictions(i)\n", - " y_pred.append(prediction)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "yj76Tu6ODalZ" - }, - "outputs": [], - "source": [ - "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", - "print(f\"EM score: {em}\")\n", - "print(f\"F1 score: {f1}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Vkb2qXljFYqX" - }, - "source": [ - "\n", - "\n", - "\n" - ] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} From 890267868c04d2e1046d35c66f29d2e7c619b4f3 Mon Sep 17 00:00:00 2001 From: alan blount Date: Thu, 10 Oct 2024 08:51:49 -0600 Subject: [PATCH 57/76] docs: cleanup rag-grounding README (#1233) summaries of notebooks, slightly better structure to the file # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- ...upervised_finetuning_using_gemini_qa.ipynb | 716 +++++++++--------- rag-grounding/README.md | 322 +++++--- 2 files changed, 574 insertions(+), 464 deletions(-) diff --git a/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb b/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb index 5456046578..266a1a24a2 100644 --- a/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb +++ b/gemini/tuning/supervised_finetuning_using_gemini_qa.ipynb @@ -25,6 +25,9 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "ojoyvz6mH1Hv" + }, "source": [ "# Supervised Fine Tuning with Gemini 1.5 Flash for Q&A\n", "\n", @@ -50,10 +53,7 @@ " \n", "
" - ], - "metadata": { - "id": "ojoyvz6mH1Hv" - } + ] }, { "cell_type": "markdown", @@ -99,6 +99,9 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "f7SS5pzuIA-1" + }, "source": [ "### Costs\n", "\n", @@ -114,10 +117,7 @@ "to generate a cost estimate based on your projected usage.\n", "\n", "To get an estimate of the number of tokens" - ], - "metadata": { - "id": "f7SS5pzuIA-1" - } + ] }, { "cell_type": "markdown", @@ -236,6 +236,7 @@ "source": [ "# Use the environment variable if the user doesn't provide Project ID.\n", "import os\n", + "\n", "import vertexai\n", "\n", "PROJECT_ID = \"\" # @param {type:\"string\", isTemplate: true}\n", @@ -264,33 +265,30 @@ }, "outputs": [], "source": [ + "import json\n", + "import time\n", + "\n", "# Vertex AI SDK\n", "from google.cloud import aiplatform\n", "from google.cloud.aiplatform.metadata import context\n", "from google.cloud.aiplatform.metadata import utils as metadata_utils\n", - "from vertexai.generative_models import (\n", - " GenerationConfig,\n", - " GenerativeModel,\n", - " HarmBlockThreshold,\n", - " HarmCategory,\n", - ")\n", - "from vertexai.preview.tuning import sft\n", - "\n", - "# Vertex AI SDK\n", - "from sklearn.metrics import f1_score\n", - "from sklearn.feature_extraction.text import TfidfVectorizer\n", - "import pandas as pd\n", - "import array\n", - "import time\n", - "from datasets import load_dataset\n", "import numpy as np\n", + "import pandas as pd\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", - "import json" + "from sklearn.feature_extraction.text import TfidfVectorizer\n", + "\n", + "# Vertex AI SDK\n", + "from sklearn.metrics import f1_score\n", + "from vertexai.generative_models import GenerationConfig, GenerativeModel\n", + "from vertexai.preview.tuning import sft" ] }, { "cell_type": "markdown", + "metadata": { + "id": "6bBZa2I-c-x8" + }, "source": [ "### Data\n", "\n", @@ -298,214 +296,222 @@ "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", "\n", "You can fine more information on the SQuAD [github page](https://rajpurkar.github.io/SQuAD-explorer/)**bold text**" - ], - "metadata": { - "id": "6bBZa2I-c-x8" - } + ] }, { "cell_type": "markdown", - "source": [ - "First update the `BUCKET_NAME` parameter below. You can either use an existing bucket or create a new one." - ], "metadata": { "id": "KhebDJjRKePL" - } + }, + "source": [ + "First update the `BUCKET_NAME` parameter below. You can either use an existing bucket or create a new one." + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lit30Cktbfvo" + }, + "outputs": [], "source": [ "# Provide a bucket name\n", "BUCKET_NAME = \"\" # @param {type:\"string\"}\n", "BUCKET_URI = f\"gs://{BUCKET_NAME}\"\n", "print(BUCKET_URI)" - ], - "metadata": { - "id": "lit30Cktbfvo" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Only run the code below if you want to create a new Google Cloud Storage bucket." - ], "metadata": { "id": "ed-G-9cyKmPY" - } + }, + "source": [ + "Only run the code below if you want to create a new Google Cloud Storage bucket." + ] }, { "cell_type": "code", - "source": [ - "# ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" - ], + "execution_count": null, "metadata": { "id": "0UJ8S9YFA1pZ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# ! gsutil mb -l {LOCATION} -p {PROJECT_ID} {BUCKET_URI}" + ] }, { "cell_type": "markdown", - "source": [ - "Next you will copy the data into your bucket." - ], "metadata": { "id": "izjwF63tLLEq" - } + }, + "source": [ + "Next you will copy the data into your bucket." + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wjvcxx_sA3xP" + }, + "outputs": [], "source": [ "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_test.csv .\n", "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_train.csv .\n", "!gsutil cp gs://github-repo/generative-ai/gemini/tuning/qa/squad_validation.csv ." - ], - "metadata": { - "id": "wjvcxx_sA3xP" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "3F10LuZeL3kt" + }, "source": [ "### Baseline\n", "\n", "Next you will prepare some test data that you will use to establish a baseline. This means evaluating your chosen model on a representative sample of your dataset before any fine-tuning. A baseline allows you to quantify the improvements achieved through fine-tuning." - ], - "metadata": { - "id": "3F10LuZeL3kt" - } + ] }, { "cell_type": "code", - "source": [ - "test_df = pd.read_csv('squad_test.csv')\n", - "test_df.head(1)" - ], + "execution_count": null, "metadata": { "id": "LkOmXpegA8CW" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "test_df = pd.read_csv(\"squad_test.csv\")\n", + "test_df.head(1)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "mrLxcVVcMsNO" + }, "source": [ "You will need to do some dataset preperations. We will add a system instruction to the dataset:\n", "\n", "`SystemInstruct`: System instructions are a set of instructions that the model processes before it processes prompts. We recommend that you use system instructions to tell the model how you want it to behave and respond to prompts.\n", "\n", "We will also combine the `context` and `question`." - ], - "metadata": { - "id": "mrLxcVVcMsNO" - } + ] }, { "cell_type": "code", - "source": [ - "systemInstruct = \"Answer the question based on the context\"" - ], + "execution_count": null, "metadata": { "id": "c0pgJycOekZ3" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "systemInstruct = \"Answer the question based on the context\"" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "N_u3VzUMsyqj" + }, + "outputs": [], "source": [ "# combine the systeminstruct + context + question into one column.\n", "row_dataset = 6\n", "\n", - "test_df[\"input_question\"] = systemInstruct + \"\\n\" + \"Context: \" + test_df[\"context\"] + \"\\n\" + \"Question: \" + test_df[\"question\"]\n", + "test_df[\"input_question\"] = (\n", + " systemInstruct\n", + " + \"\\n\"\n", + " + \"Context: \"\n", + " + test_df[\"context\"]\n", + " + \"\\n\"\n", + " + \"Question: \"\n", + " + test_df[\"question\"]\n", + ")\n", "test_question = test_df[\"input_question\"].iloc[row_dataset]\n", "print(test_question)" - ], - "metadata": { - "id": "N_u3VzUMsyqj" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Next, set the model that you will use. In this example you will use `gemini-1.5-flash-002`. A multimodal model that is designed for high-volume, cost-effective applications, and which delivers speed and efficiency to build fast, lower-cost applications that don't compromise on quality.\n", - "\n" - ], "metadata": { "id": "FSxYYqMGWrmj" - } + }, + "source": [ + "Next, set the model that you will use. In this example you will use `gemini-1.5-flash-002`. A multimodal model that is designed for high-volume, cost-effective applications, and which delivers speed and efficiency to build fast, lower-cost applications that don't compromise on quality.\n" + ] }, { "cell_type": "code", - "source": [ - "base_model = \"gemini-1.5-flash-002\"\n", - "generation_model = GenerativeModel(base_model)" - ], + "execution_count": null, "metadata": { "id": "t-5X4goiqqBQ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "base_model = \"gemini-1.5-flash-002\"\n", + "generation_model = GenerativeModel(base_model)" + ] }, { "cell_type": "markdown", - "source": [ - "Next lets take a question and get a prediction from Gemini that we can compare to the actual answer." - ], "metadata": { "id": "wyscyIenW4WZ" - } + }, + "source": [ + "Next lets take a question and get a prediction from Gemini that we can compare to the actual answer." + ] }, { "cell_type": "code", - "source": [ - "y_true = test_df[\"answers\"].values\n", - "y_pred_question = test_df[\"question\"].values" - ], + "execution_count": null, "metadata": { "id": "ejjhfynQWc0k" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "y_true = test_df[\"answers\"].values\n", + "y_pred_question = test_df[\"question\"].values" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cXencUYc6YAE" + }, + "outputs": [], "source": [ "def get_predictions(question: str) -> str:\n", - " \"\"\"Generates predictions for a given test question.\n", + " \"\"\"Generates predictions for a given test question.\n", "\n", - " Args:\n", - " test_question: The question to generate predictions for.\n", + " Args:\n", + " test_question: The question to generate predictions for.\n", "\n", - " Returns:\n", - " The generated prediction text.\n", - " \"\"\"\n", + " Returns:\n", + " The generated prediction text.\n", + " \"\"\"\n", "\n", - " prompt = f\"{question}\"\n", + " prompt = f\"{question}\"\n", "\n", - " generation_config = GenerationConfig(\n", - " temperature=0.1)\n", + " generation_config = GenerationConfig(temperature=0.1)\n", "\n", - " response = generation_model.generate_content(\n", - " contents=prompt, generation_config=generation_config\n", - " ).text\n", + " response = generation_model.generate_content(\n", + " contents=prompt, generation_config=generation_config\n", + " ).text\n", "\n", - " return response" - ], - "metadata": { - "id": "cXencUYc6YAE" - }, - "execution_count": null, - "outputs": [] + " return response" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gKa0wLooa3Is" + }, + "outputs": [], "source": [ "test_answer = test_df[\"answers\"].iloc[row_dataset]\n", "\n", @@ -513,28 +519,28 @@ "\n", "print(f\"Gemini response: {response}\")\n", "print(f\"Actual answer: {test_answer}\")" - ], - "metadata": { - "id": "gKa0wLooa3Is" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "OGRJTHKrdujw" + }, "source": [ "You can see that both answers are correct, but the response from Gemini is more lengthy. However, answers in the SQuAD dataset are typically concise and clear.\n", "\n", "Fine-tuning is a great way to control the type of output your use case requires. In this instance, you would want the model to provide short, clear answers.\n", "\n", "Next, let's check if each dataset has an equal number of examples." - ], - "metadata": { - "id": "OGRJTHKrdujw" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dCe0CUsi5E-Y" + }, + "outputs": [], "source": [ "y_pred = test_df[\"question\"].values\n", "\n", @@ -543,15 +549,13 @@ "\n", "num_strings_true = np.sum([isinstance(item, str) for item in y_true])\n", "print(f\"Number of strings in y_true: {num_strings_true}\")" - ], - "metadata": { - "id": "dCe0CUsi5E-Y" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "hvi7m8pKE8WB" + }, "source": [ "Next lest establish a baseline using evaluation metrics.\n", "\n", @@ -562,15 +566,17 @@ "F1 score is more flexible. It considers the overlap between the predicted answer and the true answer in terms of individual words or tokens. It calculates the harmonic mean of precision (proportion of correctly predicted words out of all predicted words) and recall (proportion of correctly predicted words out of all true answer words). This allows for partial credit and is less sensitive to minor wording differences.\n", "\n", "In practice, EM is useful when exact wording is crucial, while F1 is more suitable when evaluating the overall understanding and semantic accuracy of the QA system. Often, both metrics are used together to provide a comprehensive evaluation." - ], - "metadata": { - "id": "hvi7m8pKE8WB" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XcgEpTU55FFc" + }, + "outputs": [], "source": [ - "def calculate_em_and_f1_for_text_arrays(y_true, y_pred, average='weighted'):\n", + "def calculate_em_and_f1_for_text_arrays(y_true, y_pred, average=\"weighted\"):\n", " \"\"\"\n", " Calculates the Exact Match (EM) and F1 score for arrays of text\n", " using word-level comparisons.\n", @@ -599,29 +605,27 @@ "\n", " f1 = f1_score(y_true_class, y_pred_class, average=average)\n", "\n", - " return em, f1\n" - ], - "metadata": { - "id": "XcgEpTU55FFc" - }, - "execution_count": null, - "outputs": [] + " return em, f1" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rhDTq9p_GSBP" + }, + "outputs": [], "source": [ "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", "print(f\"EM score: {em}\")\n", "print(f\"F1 score: {f1}\")" - ], - "metadata": { - "id": "rhDTq9p_GSBP" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "22DfexbNfUHm" + }, "source": [ "### Prepare the data for fine-tuning\n", "\n", @@ -661,88 +665,109 @@ "```\n", "\n", "Refer to the public [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini-supervised-tuning-prepare#about-datasets) for more details." - ], - "metadata": { - "id": "22DfexbNfUHm" - } + ] }, { "cell_type": "code", - "source": [ - "# combine the systeminstruct + context + question into one column.\n", - "train_df = pd.read_csv('squad_train.csv')\n", - "validation_df = pd.read_csv('squad_validation.csv')\n", - "\n", - "train_df[\"input_question\"] = systemInstruct + \"Context: \" + train_df[\"context\"] + \"Question: \" + train_df[\"question\"]\n", - "validation_df[\"input_question\"] = systemInstruct + \"Context: \" + validation_df[\"context\"] + \"Question: \" + validation_df[\"question\"]" - ], + "execution_count": null, "metadata": { "id": "4DqrQp4cLqRy" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# combine the systeminstruct + context + question into one column.\n", + "train_df = pd.read_csv(\"squad_train.csv\")\n", + "validation_df = pd.read_csv(\"squad_validation.csv\")\n", + "\n", + "train_df[\"input_question\"] = (\n", + " systemInstruct\n", + " + \"Context: \"\n", + " + train_df[\"context\"]\n", + " + \"Question: \"\n", + " + train_df[\"question\"]\n", + ")\n", + "validation_df[\"input_question\"] = (\n", + " systemInstruct\n", + " + \"Context: \"\n", + " + validation_df[\"context\"]\n", + " + \"Question: \"\n", + " + validation_df[\"question\"]\n", + ")" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Pmzyz1migvHN" + }, + "outputs": [], "source": [ "def df_to_jsonl(df, output_file):\n", - " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", - "\n", - " Args:\n", - " df: The DataFrame to convert.\n", - " output_file: The name of the output file.\n", - " \"\"\"\n", - "\n", - " with open(output_file, 'w') as f:\n", - " for row in df.itertuples(index=False):\n", - " jsonl_obj = {\n", - " \"systemInstruction\": {\"parts\": [{\"text\": \"Answer the question based on the provided context.\"}]},\n", - " \"contents\": [\n", - " {\n", - " \"role\": \"user\",\n", - " \"parts\": [{\"text\": f\"Context: {row.context}\\n\\nQuestion: {row.question}\"}]\n", - " },\n", - " {\"role\": \"model\", \"parts\": [{\"text\": row.answers}]},\n", - " ]\n", - " }\n", - " f.write(json.dumps(jsonl_obj) + '\\n')\n", + " \"\"\"Converts a Pandas DataFrame to JSONL format and saves it to a file.\n", + "\n", + " Args:\n", + " df: The DataFrame to convert.\n", + " output_file: The name of the output file.\n", + " \"\"\"\n", + "\n", + " with open(output_file, \"w\") as f:\n", + " for row in df.itertuples(index=False):\n", + " jsonl_obj = {\n", + " \"systemInstruction\": {\n", + " \"parts\": [\n", + " {\"text\": \"Answer the question based on the provided context.\"}\n", + " ]\n", + " },\n", + " \"contents\": [\n", + " {\n", + " \"role\": \"user\",\n", + " \"parts\": [\n", + " {\n", + " \"text\": f\"Context: {row.context}\\n\\nQuestion: {row.question}\"\n", + " }\n", + " ],\n", + " },\n", + " {\"role\": \"model\", \"parts\": [{\"text\": row.answers}]},\n", + " ],\n", + " }\n", + " f.write(json.dumps(jsonl_obj) + \"\\n\")\n", + "\n", "\n", "# Process the DataFrames\n", - "df_to_jsonl(train_df, 'squad_train.jsonl')\n", - "df_to_jsonl(validation_df, 'squad_validation.jsonl')\n", + "df_to_jsonl(train_df, \"squad_train.jsonl\")\n", + "df_to_jsonl(validation_df, \"squad_validation.jsonl\")\n", "\n", "print(f\"JSONL data written to squad_train.jsonl\")\n", "print(f\"JSONL data written to squad_validation.jsonl\")" - ], - "metadata": { - "id": "Pmzyz1migvHN" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Next you will copy the files into your Google Cloud bucket" - ], "metadata": { "id": "5OQv-ZMpJDhi" - } + }, + "source": [ + "Next you will copy the files into your Google Cloud bucket" + ] }, { "cell_type": "code", - "source": [ - "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", - "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" - ], + "execution_count": null, "metadata": { "id": "O5k1jYJ10IeW" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!gsutil cp ./squad_train.jsonl {BUCKET_URI}\n", + "!gsutil cp ./squad_validation.jsonl {BUCKET_URI}" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "UAHMYgFJJHjm" + }, "source": [ "### Start fine-tuning job\n", "Next you can start the fine-tuning job.\n", @@ -758,13 +783,15 @@ " - `adapter_size` : Gemini 1.5 Flash supports Adapter length [1, 4], default value is 4.\n", "\n", " **Important**: The default hyperparameter settings are optimized for optimal performance based on rigorous testing and are recommended for initial use. Users may customize these parameters to address specific performance requirements.**" - ], - "metadata": { - "id": "UAHMYgFJJHjm" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qj-LjQ5Vbf1E" + }, + "outputs": [], "source": [ "tuned_model_display_name = \"fine-tuning-gemini-flash-qa-v01\" # @param {type:\"string\"}\n", "\n", @@ -775,100 +802,98 @@ " validation_dataset=f\"\"\"{BUCKET_URI}/squad_validation.jsonl\"\"\",\n", " tuned_model_display_name=tuned_model_display_name,\n", ")" - ], - "metadata": { - "id": "qj-LjQ5Vbf1E" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "# Get the tuning job info.\n", - "sft_tuning_job.to_dict()" - ], + "execution_count": null, "metadata": { "id": "3tXawW1p8E5-" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# Get the tuning job info.\n", + "sft_tuning_job.to_dict()" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "19aQnN-k84d9" + }, + "outputs": [], "source": [ "# Get the resource name of the tuning job\n", "sft_tuning_job_name = sft_tuning_job.resource_name\n", "sft_tuning_job_name" - ], - "metadata": { - "id": "19aQnN-k84d9" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "**Important:** Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc." - ], "metadata": { "id": "UKo8cwF2KVM5" - } + }, + "source": [ + "**Important:** Tuning time depends on several factors, such as training data size, number of epochs, learning rate multiplier, etc." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "8NiZnPkIKcwm" + }, "source": [ "
\n", "⚠️ It will take ~30 mins for the model tuning job to complete on the provided dataset and set configurations/hyperparameters. ⚠️\n", "
" - ], - "metadata": { - "id": "8NiZnPkIKcwm" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Njag_3cB86rH" + }, + "outputs": [], "source": [ "%%time\n", "# Wait for job completion\n", "while not sft_tuning_job.refresh().has_ended:\n", " time.sleep(60)" - ], - "metadata": { - "id": "Njag_3cB86rH" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dkx92RBdbf27" + }, + "outputs": [], "source": [ "# tuned model name\n", "tuned_model_name = sft_tuning_job.tuned_model_name\n", "tuned_model_name" - ], - "metadata": { - "id": "dkx92RBdbf27" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "e09aB_9Ebf5c" + }, + "outputs": [], "source": [ "# tuned model endpoint name\n", "tuned_model_endpoint_name = sft_tuning_job.tuned_model_endpoint_name\n", "tuned_model_endpoint_name" - ], - "metadata": { - "id": "e09aB_9Ebf5c" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "gV1ukBznKmlN" + }, "source": [ "#### Model tuning metrics\n", "\n", @@ -883,26 +908,28 @@ "- `/eval_num_predictions`: Number of predicted tokens at an evaluation step.\n", "\n", "The metrics visualizations are available after the model tuning job completes. If you don't specify a validation dataset when you create the tuning job, only the visualizations for the tuning metrics are available." - ], - "metadata": { - "id": "gV1ukBznKmlN" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "IHVU4XP2aOFE" + }, + "outputs": [], "source": [ "# Get resource name from tuning job.\n", "experiment_name = sft_tuning_job.experiment.resource_name\n", "experiment_name" - ], - "metadata": { - "id": "IHVU4XP2aOFE" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DH0guHM---Jo" + }, + "outputs": [], "source": [ "# Locate Vertex AI Experiment and Vertex AI Experiment Run\n", "experiment = aiplatform.Experiment(experiment_name=experiment_name)\n", @@ -911,29 +938,29 @@ " parent_contexts=[experiment.resource_name],\n", ")\n", "experiment_run = context.Context.list(filter_str)[0]" - ], - "metadata": { - "id": "DH0guHM---Jo" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hggHQFIl_FXC" + }, + "outputs": [], "source": [ "# Read data from Tensorboard\n", "tensorboard_run_name = f\"{experiment.get_backing_tensorboard_resource().resource_name}/experiments/{experiment.name}/runs/{experiment_run.name.replace(experiment.name, '')[1:]}\"\n", "tensorboard_run = aiplatform.TensorboardRun(tensorboard_run_name)\n", "metrics = tensorboard_run.read_time_series_data()" - ], - "metadata": { - "id": "hggHQFIl_FXC" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BdHKZdqG_bHf" + }, + "outputs": [], "source": [ "def get_metrics(metric: str = \"/train_total_loss\"):\n", " \"\"\"\n", @@ -952,28 +979,28 @@ " steps_loss.append(loss.scalar.value)\n", " steps.append(loss.step)\n", " return steps, steps_loss" - ], - "metadata": { - "id": "BdHKZdqG_bHf" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_pDrlpA7_e9o" + }, + "outputs": [], "source": [ "# Get Train and Eval Loss\n", "train_loss = get_metrics(metric=\"/train_total_loss\")\n", "eval_loss = get_metrics(metric=\"/eval_total_loss\")" - ], - "metadata": { - "id": "_pDrlpA7_e9o" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DL07j7u__iZx" + }, + "outputs": [], "source": [ "# Plot the train and eval loss metrics using Plotly python library\n", "fig = make_subplots(\n", @@ -1003,24 +1030,24 @@ "\n", "# Show plot\n", "fig.show()" - ], - "metadata": { - "id": "DL07j7u__iZx" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "### Use the fine-tuned model and evaluation" - ], "metadata": { "id": "pivmh4Lwbgy1" - } + }, + "source": [ + "### Use the fine-tuned model and evaluation" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qO6ln4teagw1" + }, + "outputs": [], "source": [ "prompt = \"\"\"\n", "Answer the question based on the context\n", @@ -1033,92 +1060,85 @@ "\n", "Question: How is lap provided by overlapping the admission side port?\n", "\"\"\"" - ], - "metadata": { - "id": "qO6ln4teagw1" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dYygz5ph_icf" + }, + "outputs": [], "source": [ "if True:\n", " tuned_genai_model = GenerativeModel(tuned_model_endpoint_name)\n", " # Test with the loaded model.\n", " print(\"***Testing***\")\n", - " print(\n", - " tuned_genai_model.generate_content(\n", - " contents=prompt\n", - " )\n", - " )\n", + " print(tuned_genai_model.generate_content(contents=prompt))\n", "else:\n", " print(\"State:\", sft_tuning_job.state)\n", " print(\"Error:\", sft_tuning_job.error)" - ], - "metadata": { - "id": "dYygz5ph_icf" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "W4YMNGuoDajB" + }, + "outputs": [], "source": [ "y_true = test_df[\"answers\"].values\n", "\n", - "def get_predictions(test_question):\n", "\n", - " prompt = f\"\"\"{test_question}\"\"\"\n", + "def get_predictions(test_question):\n", + " prompt = f\"\"\"{test_question}\"\"\"\n", "\n", - " generation_config = GenerationConfig(\n", - " temperature=0.1,\n", - " )\n", + " generation_config = GenerationConfig(\n", + " temperature=0.1,\n", + " )\n", "\n", - " response = tuned_genai_model.generate_content(contents=prompt, generation_config=generation_config).text\n", + " response = tuned_genai_model.generate_content(\n", + " contents=prompt, generation_config=generation_config\n", + " ).text\n", "\n", - " return response" - ], - "metadata": { - "id": "W4YMNGuoDajB" - }, - "execution_count": null, - "outputs": [] + " return response" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "69FMuAeoDrm5" + }, + "outputs": [], "source": [ "y_pred = []\n", "y_pred_question = test_df[\"question\"].values\n", "\n", "for i in y_pred_question:\n", - "\n", - " prediction = get_predictions(i)\n", - " y_pred.append(prediction)" - ], - "metadata": { - "id": "69FMuAeoDrm5" - }, - "execution_count": null, - "outputs": [] + " prediction = get_predictions(i)\n", + " y_pred.append(prediction)" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yj76Tu6ODalZ" + }, + "outputs": [], "source": [ "em, f1 = calculate_em_and_f1_for_text_arrays(y_pred, y_true)\n", "print(f\"EM score: {em}\")\n", "print(f\"F1 score: {f1}\")" - ], - "metadata": { - "id": "yj76Tu6ODalZ" - }, - "execution_count": null, - "outputs": [] + ] } ], "metadata": { "colab": { - "provenance": [] + "name": "supervised_finetuning_using_gemini_qa.ipynb", + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", @@ -1127,4 +1147,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/rag-grounding/README.md b/rag-grounding/README.md index a9f0f5feec..5f5fdc17d4 100644 --- a/rag-grounding/README.md +++ b/rag-grounding/README.md @@ -1,22 +1,22 @@ # RAG and Grounding -This directory provides a curated list of notebooks that explore Retrieval +This directory provides a curated list of examples that explore Retrieval Augmented Generation (RAG), grounding techniques, knowledge bases, grounded generation, and related topics like vector search and semantic search. -All of these links are notebooks or other examples in this repository, but are -indexed here for your convenience. +All of these links are examples in this repository, but are indexed here for +your convenience. ## What is RAG and Grounding? ![Animated GIF showing "what is grounding"](./img/what-is-grounding.gif) -- Ungrounded generation relies on the LLM training data alone and is prone to - hallucinations when it doesn't have all the right facts -- **Grounding** a LLM with relevant facts provides fresh and potentially private - data to the model as part of it's input or prompt -- **RAG** is a technique which retrieves relevant facts, often via search, and - provides them to the LLM +- Ungrounded generation relies on the LLM training data alone and is prone to + hallucinations when it doesn't have all the right facts +- **Grounding** a LLM with relevant facts provides fresh and potentially + private data to the model as part of it's input or prompt +- **RAG** is a technique which retrieves relevant facts, often via search, and + provides them to the LLM Using RAG and Grounding to improve generations and reduce hallucinations is becoming commonplace. Doing so well and generating extremely high quality @@ -30,115 +30,204 @@ search engine and RAG application, and the evaluations needed to hill climb See [this blog post: How to evaluate generated answers from RAG at scale on Vertex AI](https://medium.com/google-cloud/vqa-3-how-to-evaluate-generated-answers-from-rag-at-scale-on-vertex-ai-70bc397cb33d) -for a walkthrough. - -- **[evaluate_rag_gen_ai_evaluation_service_sdk.ipynb](../gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb)**: - Evaluates RAG systems using the Gen AI Evaluation Service SDK. -- **[ragas_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/ragas_with_gemini.ipynb)**: - Use Case - using Ragas with Gemini for Eval. -- **[deepeval_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/deepeval_with_gemini.ipynb)**: - Use Case - using DeepEval with Gemini for Eval. +for a more in-depth walkthrough. + +- **[evaluate_rag_gen_ai_evaluation_service_sdk.ipynb](../gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb)**: + Evaluates RAG systems using the Gen AI Evaluation Service SDK, offering both + reference-free and reference-based evaluation methods with visualization. +- **[ragas_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/ragas_with_gemini.ipynb)**: + Evaluates RAG pipelines using the RAGAS framework and the Gemini Pro model + for Q&A tasks. +- **[deepeval_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/deepeval_with_gemini.ipynb)**: + Evaluates Gemini Pro's performance on a question-answering task using + DeepEval and the Vertex AI Gemini API, including Pytest integration. ## Out of the Box RAG/Grounding -- **[Vertex AI Search - sample Web App](../search/web-app/)**: Take a look at - this sample web app using Vertex AI Search, which is a flexible and easy to - use "out of the box" solution for search & RAG/Grounding. -- **[bulk_question_answering.ipynb](../search/bulk-question-answering/bulk_question_answering.ipynb)**: - Answers multiple questions using a search system -- **[contract_analysis.ipynb](../search/retrieval-augmented-generation/examples/contract_analysis.ipynb)**, - **[question_answering.ipynb](../search/retrieval-augmented-generation/examples/question_answering.ipynb)**, - **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: - Showcase specific RAG use cases -- **[search_data_blending_with_gemini_summarization.ipynb](../search/search_data_blending_with_gemini_summarization.ipynb)**: - Demonstrates calling a search app that blends information from multiple stores - (GCS, BQ, site) and summarizes search snippets and responses using the - Gemini Pro model. -- **[vertexai_search_options.ipynb](../search/vertexai-search-options/vertexai_search_options.ipynb)**: - Shows how to use Vertex AI Search in conjunction with the Gemini Pro model to - retrieve and summarize data across multiple data stores within Google Cloud - Platform (GCP). It highlights how the Gemini Pro model is able to formulate a - summary of user-specific prompts based on the retrieved snippets and citations - from Vertex AI Search. - -## Build your own RAG/Grounding - -We have several notebooks and examples for specific use cases or types of data -which may require a custom RAG and Grounding. We have many products which can be -used to build a RAG/Grounding pipeline of your own, or which you can add to an -existing RAG and Grounding solution. - -- [Vertex AI APIs for building search and RAG](https://cloud.google.com/generative-ai-app-builder/docs/builder-apis) - has a list of several APIs you can use in isolation or in combination -- [LlamaIndex on Vertex](https://cloud.google.com/vertex-ai/generative-ai/docs/rag-overview) - allows you to assemble a RAG search using popular OSS framework and components - from Google or Open Source -- [This end-to-end DIY RAG example in a notebook](https://github.com/GoogleCloudPlatform/applied-ai-engineering-samples/blob/main/genai-on-vertex-ai/retrieval_augmented_generation/diy_rag_with_vertexai_apis/build_grounded_rag_app_with_vertex.ipynb) - written in LangChain and using some of these APIs -- The Google Cloud Architecture Center has reference architectures on - [building a RAG infrastructure with GKE](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-gke) - or - [using alloydb and a few Vertex services](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-vertex-ai) - -### Search - -Vertex AI Search is an end-to-end Search engine which delivers high quality -grounded generation and RAG at scale, built-in. - -Vertex AI Vector Search is a extremely performant Vector Database which powers -Vertex AI Search. Other databases like AlloyDB and BigQuery also have vector -searches, each with different performance characteristics and retrieval -performance. +With +**[Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/)**, +you can build a RAG/Grounding system in a few clicks or a few lines of code and +be ready for scale with high quality results. Vertex AI Search is an end-to-end +Search engine builder, giving you Google quality search for your own data. + +- **[Vertex AI Search - sample Web App](../search/web-app/)**: Take a look at + this sample web app using Vertex AI Search, which is a flexible and easy to + use "out of the box" solution for search & RAG/Grounding. +- **[bulk_question_answering.ipynb](../search/bulk-question-answering/bulk_question_answering.ipynb)**: + Processes questions from a CSV and outputs the results (top documents and + extractive answers) to a TSV file using Vertex AI Search. +- **[contract_analysis.ipynb](../search/retrieval-augmented-generation/examples/contract_analysis.ipynb)**: + Demonstrates RAG for contract analysis using Palm2, LangChain, and a vector + store, with a Gradio interface for querying contracts and retrieving answers + with source references. +- **[question_answering.ipynb](../search/retrieval-augmented-generation/examples/question_answering.ipynb)**: + Builds a question-answering system using Vertex AI Search and LangChain to + retrieve information from unstructured documents and leverage LLMs for + answering with citations. +- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: + Builds a question-answering system from Google Cloud documentation using RAG + and evaluates the impact of different parameter settings on model + performance. +- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: + Showcase specific RAG use cases +- **[search_data_blending_with_gemini_summarization.ipynb](../search/search_data_blending_with_gemini_summarization.ipynb)**: + Demonstrates calling a search app that blends information from multiple + stores (GCS, BQ, site) and summarizes search snippets and responses using + the Gemini Pro model. +- **[vertexai_search_options.ipynb](../search/vertexai-search-options/vertexai_search_options.ipynb)**: + Demonstrates three approaches for using Vertex AI Search: direct API usage, + grounding with Gemini, and integration with LangChain. + +Vertex AI Search can be configured to adapt to many different use cases and +data. + + + + +## Bring your own Search for RAG/Grounding + +The Vertex AI Search - Grounded Generation API allows you to use a custom search +engine for RAG/Grounding. You can wrap any search engine with a cloud function +exposing a REST API and then plug it into the Grounded Generation API as a +`grounding_source`. + +See the +[Grounded Generation API documentation](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen) +for more information. + +Demo coming soon. + +## Build your own Search for RAG/Grounding + +You may want to build your own search engine for RAG/Grounding, perhaps because +you have a unique use case or perhaps because you want to use a specific search +engine, or perhaps there are constraints on what you can use. + +We have many component APIs which can be used to build a RAG/Grounding pipeline +of your own. + +- [Vertex AI APIs for building search and RAG](https://cloud.google.com/generative-ai-app-builder/docs/builder-apis) + has a list of several APIs you can use in isolation or in combination + +We have a managed service to assemble component using LlamaIndex style SDK. + +- [LlamaIndex on Vertex](https://cloud.google.com/vertex-ai/generative-ai/docs/rag-overview) + allows you to assemble a RAG search using popular OSS framework and + components from Google or Open Source + +We have a few reference architectures you can use to build your own +RAG/Grounding pipeline from the ground up. + +- [This end-to-end DIY RAG example in a notebook](https://github.com/GoogleCloudPlatform/applied-ai-engineering-samples/blob/main/genai-on-vertex-ai/retrieval_augmented_generation/diy_rag_with_vertexai_apis/build_grounded_rag_app_with_vertex.ipynb) + written in LangChain and using some of these APIs +- The Google Cloud Architecture Center has reference architectures on + [building a RAG infrastructure with GKE](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-gke) + or + [using alloydb and a few Vertex services](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-vertex-ai) + +More coming soon. + +## Build with a Vector Database + +Vertex AI Vector Search (Formerly known as Matching Engine) is a highly scalable +and performant vector database which powers Vertex AI Search. + +AlloyDB, BigQuery and Redis also have vector search capabilities, each with +different performance characteristics - though each of them is a general purpose +database and not purpose built for embeddings like Vector Search is. + +Note that you can use a Vector Database for RAG/Grounding and for many other use +cases, like recommendation systems, clustering, and anomaly detection. + +**[Document_QnA_using_gemini_and_vector_search.ipynb](../gemini/use-cases/retrieval-augmented-generation/Document_QnA_using_gemini_and_vector_search.ipynb)** +Demonstrates building a multimodal question-answering system using Gemini and +Vertex AI Vector Search for PDFs containing text and images, employing retrieval +augmented generation (RAG). ### Embeddings -- **[intro_Vertex_AI_embeddings.ipynb](../gemini/qa-ops/intro_Vertex_AI_embeddings.ipynb)**: - Introduces Vertex AI embeddings. -- **[hybrid-search.ipynb](../embeddings/hybrid-search.ipynb)**: Explores - combining different search techniques, potentially including vector search and - keyword-based search. -- **[intro-textemb-vectorsearch.ipynb](../embeddings/intro-textemb-vectorsearch.ipynb)**: - Introduces text embeddings and vector search. -- **[vector-search-quickstart.ipynb](../embeddings/vector-search-quickstart.ipynb)**: - Quick start guide for implementing vector search. -- **[bq-vector-search-log-outlier-detection.ipynb](../embeddings/use-cases/outlier-detection/bq-vector-search-log-outlier-detection.ipynb)**: - Demonstrates using vector search with BigQuery logs to identify outliers. +The best explanation of embeddings I've seen + +- **[intro_Vertex_AI_embeddings.ipynb](../gemini/qa-ops/intro_Vertex_AI_embeddings.ipynb)**: + Introduces Vertex AI's text and multimodal embeddings APIs and demonstrates + their use in building a simple e-commerce search application with text, + image, and video queries. +- **[hybrid-search.ipynb](../embeddings/hybrid-search.ipynb)**: Demonstrates + hybrid search (combining semantic and keyword search) using Vertex AI Vector + Search. +- **[intro-textemb-vectorsearch.ipynb](../embeddings/intro-textemb-vectorsearch.ipynb)**: + Demonstrates building semantic search capabilities using Vertex AI's text + embeddings and vector search, grounding LLM outputs with real-world data. +- **[vector-search-quickstart.ipynb](../embeddings/vector-search-quickstart.ipynb)**: + Provides a quickstart tutorial for Vertex AI Vector Search, guiding users + through setting up, building, deploying, and querying a vector search index + using sample product data. +- **[bq-vector-search-log-outlier-detection.ipynb](../embeddings/use-cases/outlier-detection/bq-vector-search-log-outlier-detection.ipynb)**: + Demonstrates log anomaly detection and investigation using Vertex AI, + BigQuery, and text embeddings to identify semantically similar past actions + for outlier analysis. ### Gemini -- **[intro-grounding-gemini.ipynb](../gemini/grounding/intro-grounding-gemini.ipynb)**: - Introduces grounding in the context of Gemini. -- **[building_DIY_multimodal_qa_system_with_mRAG.ipynb](../gemini/qa-ops/building_DIY_multimodal_qa_system_with_mRAG.ipynb)**: - Builds a custom multimodal question-answering system using mRAG. -- **[code_retrieval_augmented_generation.ipynb](../language/code/code_retrieval_augmented_generation.ipynb)**: - Demonstrates using code retrieval to improve code generation. -- **[intro-grounding.ipynb](../language/grounding/intro-grounding.ipynb)**: - Introduction to grounding in natural language processing -- **[langchain_bigquery_data_loader.ipynb](../language/orchestration/langchain/langchain_bigquery_data_loader.ipynb)**: - Uses LangChain to load data from BigQuery for RAG -- **[question_answering_documents.ipynb](../language/use-cases/document-qa/question_answering_documents.ipynb)**, - **[question_answering_documents_langchain.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain.ipynb)**, - **[question_answering_documents_langchain_matching_engine.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb)**: - Focus on question answering over documents -- **[summarization_large_documents.ipynb](../language/use-cases/document-summarization/summarization_large_documents.ipynb)**, - **[summarization_large_documents_langchain.ipynb](../language/use-cases/document-summarization/summarization_large_documents_langchain.ipynb)**: - Demonstrate summarizing large documents. -- **[llamaindex_workflows.ipynb](../gemini/orchestration/llamaindex_workflows.ipynb)** Using LlamaIndex Workflows to build an event driven RAG flow. +- **[intro-grounding-gemini.ipynb](../gemini/grounding/intro-grounding-gemini.ipynb)**: + Demonstrates grounding LLM responses in Google Search and Vertex AI Search + using Gemini, improving response accuracy and reducing hallucinations. +- **[intro-grounding.ipynb](../language/grounding/intro-grounding.ipynb)**: + Demonstrates using Vertex AI's grounding feature to improve LLM response + accuracy and relevance by grounding them in Google Search or custom Vertex + AI Search data stores. +- **[building_DIY_multimodal_qa_system_with_mRAG.ipynb](../gemini/qa-ops/building_DIY_multimodal_qa_system_with_mRAG.ipynb)**: + Builds a custom multimodal question-answering system using mRAG. +- **[code_retrieval_augmented_generation.ipynb](../language/code/code_retrieval_augmented_generation.ipynb)**: + Demonstrates RAG for code using Gemini, LangChain, FAISS, and Vertex AI's + Embeddings API to enhance code generation by incorporating external + knowledge from the Google Cloud Generative AI GitHub repository. +- **[langchain_bigquery_data_loader.ipynb](../language/orchestration/langchain/langchain_bigquery_data_loader.ipynb)**: + Demonstrates using LangChain's BigQuery Data Loader to query BigQuery data, + integrate it with a Vertex AI LLM, and build a chain to generate and execute + SQL queries for targeted customer analysis. +- **[question_answering_documents.ipynb](../language/use-cases/document-qa/question_answering_documents.ipynb)**: + Demonstrates three methods (stuffing, map-reduce, and map-reduce with + embeddings) for building a question-answering system using the Vertex AI + PaLM API to efficiently handle large document datasets. +- **[question_answering_documents_langchain.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain.ipynb)**: + Demonstrates building a question-answering system using LangChain and Vertex + AI's PaLM API, comparing different methods (stuffing, map-reduce, refine) + for handling large documents, and showcasing the improved efficiency of + using similarity search with embeddings. +- **[question_answering_documents_langchain_matching_engine.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb)**: + Demonstrates a question-answering system using LangChain, Vertex AI's PaLM + API, and Matching Engine for retrieval-augmented generation, enabling + fact-grounded responses with source citations. +- **[summarization_large_documents.ipynb](../language/use-cases/document-summarization/summarization_large_documents.ipynb)**: + Demonstrates four methods (stuffing, MapReduce, MapReduce with overlapping + chunks, and MapReduce with rolling summaries) for summarizing large + documents using Vertex AI's generative models, addressing challenges of + exceeding context length limits. +- **[summarization_large_documents_langchain.ipynb](../language/use-cases/document-summarization/summarization_large_documents_langchain.ipynb)**: + Demonstrates three LangChain methods (Stuffing, MapReduce, Refine) for + summarizing large documents using Vertex AI models, comparing their + effectiveness and limitations. +- **[llamaindex_workflows.ipynb](../gemini/orchestration/llamaindex_workflows.ipynb)** Using LlamaIndex Workflows to build an event driven RAG flow. ### Open Models -- **[cloud_run_ollama_gemma2_rag_qa.ipynb](../open-models/serving/cloud_run_ollama_gemma2_rag_qa.ipynb)**: - Sets up a RAG-based question-answering system using Ollama and Gemma2 on Cloud - Run +- **[cloud_run_ollama_gemma2_rag_qa.ipynb](../open-models/serving/cloud_run_ollama_gemma2_rag_qa.ipynb)**: + Demonstrates deploying Gemma 2 on Google Cloud Run with GPU acceleration + using Ollama and LangChain, building a RAG question-answering application. ## Agents on top of RAG -- **[tutorial_vertex_ai_search_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_vertex_ai_search_rag_agent.ipynb)**: - Tutorial for building RAG agents using Vertex AI Search -- **[tutorial_alloydb_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb)**: - Tutorial for building RAG agents using AlloyDB -- **[tutorial_cloud_sql_pg_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb)**: - Tutorial for building RAG agents using Cloud SQL (PostgreSQL) +- **[tutorial_vertex_ai_search_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_vertex_ai_search_rag_agent.ipynb)**: + Demonstrates building and deploying a conversational search agent on Vertex + AI using LangChain, a reasoning engine, and RAG with Vertex AI Search to + query a movie dataset. +- **[tutorial_alloydb_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb)**: + Demonstrates deploying a RAG application using LangChain, AlloyDB for + PostgreSQL, and Vertex AI, covering setup, deployment, and cleanup. +- **[tutorial_cloud_sql_pg_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb)**: + Demonstrates deploying a RAG application using LangChain, Vertex AI, and + Cloud SQL for PostgreSQL, enabling semantic search and LLM-based responses. ## Use Cases @@ -147,16 +236,17 @@ grounding techniques in various applications. Feel free to dive into the notebooks that pique your interest and start building your own RAG-powered solutions. -- Examples of RAG in different domains - - **[Document_QnA_using_gemini_and_vector_search.ipynb](../gemini/use-cases/retrieval-augmented-generation/Document_QnA_using_gemini_and_vector_search.ipynb)** - - **[NLP2SQL_using_dynamic_RAG.ipynb](../gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb)** - - **[RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb](../gemini/use-cases/retrieval-augmented-generation/RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb)** - - **[code_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/code_rag.ipynb)** - - **[intra_knowledge_qna.ipynb](../gemini/use-cases/retrieval-augmented-generation/intra_knowledge_qna.ipynb)** - - **[intro_multimodal_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/intro_multimodal_rag.ipynb)** - - **[llamaindex_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/llamaindex_rag.ipynb)** - - **[multimodal_rag_langchain.ipynb](../gemini/use-cases/retrieval-augmented-generation/multimodal_rag_langchain.ipynb)** - - **[small_to_big_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb)** -- Build RAG systems using BigQuery - - **[rag_qna_with_bq_and_featurestore.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_qna_with_bq_and_featurestore.ipynb)** - - **[rag_vector_embedding_in_bigquery.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_vector_embedding_in_bigquery.ipynb)** +- Examples of RAG in different domains + + - **[NLP2SQL_using_dynamic_RAG.ipynb](../gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb)** + - **[RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb](../gemini/use-cases/retrieval-augmented-generation/RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb)** + - **[code_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/code_rag.ipynb)** + - **[intra_knowledge_qna.ipynb](../gemini/use-cases/retrieval-augmented-generation/intra_knowledge_qna.ipynb)** + - **[intro_multimodal_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/intro_multimodal_rag.ipynb)** + - **[llamaindex_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/llamaindex_rag.ipynb)** + - **[multimodal_rag_langchain.ipynb](../gemini/use-cases/retrieval-augmented-generation/multimodal_rag_langchain.ipynb)** + - **[small_to_big_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb)** + +- Build RAG systems using BigQuery + - **[rag_qna_with_bq_and_featurestore.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_qna_with_bq_and_featurestore.ipynb)** + - **[rag_vector_embedding_in_bigquery.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_vector_embedding_in_bigquery.ipynb)** From 237c22d11fbc8d400322a24bed132e3a756d2e7a Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Thu, 10 Oct 2024 10:46:41 -0500 Subject: [PATCH 58/76] feat: Add Intro to Knowledge Engine Notebook (#1235) # Description Introduction to Vertex AI Knowledge Engine (Formerly RAG API and LlamaIndex on Vertex AI) --------- Co-authored-by: Owl Bot --- gemini/README.md | 58 +- .../intro_knowledge_engine.ipynb | 574 ++++++++++++++++++ 2 files changed, 597 insertions(+), 35 deletions(-) create mode 100644 gemini/knowledge-engine/intro_knowledge_engine.ipynb diff --git a/gemini/README.md b/gemini/README.md index c52233949a..8eb7f71f3b 100644 --- a/gemini/README.md +++ b/gemini/README.md @@ -26,7 +26,6 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for
DescriptionContents
@@ -36,11 +35,10 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for Get started with the Vertex AI Gemini API:
    -
  • gemini-pro model
  • -
  • gemini-pro-vision model
  • +
  • gemini-1.5-pro model
  • +
  • gemini-1.5-flash model
Starter notebooks
@@ -49,7 +47,6 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for sample-apps/ Discover sample apps using GeminiSample apps
@@ -57,10 +54,7 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for
use-cases/
- Explore industry use-cases enabled by Gemini (e.g. retail, education) - Sample use casesExplore industry use-cases enabled by Gemini (e.g. retail, education)
@@ -69,7 +63,6 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for evaluation/ Learn how to evaluate Gemini with Vertex AI Model Evaluation for Generative AISample notebooks
@@ -77,10 +70,7 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for
function-calling/
- Learn how to use the function calling feature of Gemini - Sample notebooksLearn how to use the function calling feature of Gemini
@@ -88,48 +78,47 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for
grounding/
- Learn how to use the grounding feature of Gemini - Sample notebooksLearn how to use the grounding feature of Gemini
- health_and_safety + neurology
- responsible-ai/ + knowledge-engine/
Learn how to use safety ratings and thresholds with the Vertex AI Gemini API.Sample notebooksDiscover how to utilize the Knowledge Engine feature of Vertex AI
- build + media_link
- reasoning-engine/ + prompts/
Learn how to create and use effective prompts with Gemini.
- Discover how to utilize the reasoning engine capabilities in Gemini + question_answer +
+ qa-ops/
Sample notebooksLearn about the question-answer operations available in Gemini
- media_link + build
- prompts/ + reasoning-engine/
Learn how to create and use effective prompts with Gemini.Sample notebooksDiscover how to utilize the Reasoning Engine feature of Vertex AI
- question_answer + health_and_safety
- qa-ops/ + responsible-ai/
Learn about the question-answer operations available in GeminiSample notebooksLearn best practices for responsible AI and security with the Vertex AI Gemini API.
@@ -138,7 +127,6 @@ The notebooks and samples in this folder focus on using the **Vertex AI SDK for tuning/ Learn how to tune and customize the Gemini models for specific use-cases.Sample notebooks
diff --git a/gemini/knowledge-engine/intro_knowledge_engine.ipynb b/gemini/knowledge-engine/intro_knowledge_engine.ipynb new file mode 100644 index 0000000000..43c399ce2c --- /dev/null +++ b/gemini/knowledge-engine/intro_knowledge_engine.ipynb @@ -0,0 +1,574 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Intro to Building a Scalable and Modular RAG System with Knowledge Engine in Vertex AI (LlamaIndex on Vertex AI)\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \"Google
Open in Colab\n", + "
\n", + "
\n", + " \n", + " \"Google
Open in Colab Enterprise\n", + "
\n", + "
\n", + " \n", + " \"Vertex
Open in Vertex AI Workbench\n", + "
\n", + "
\n", + " \n", + " \"GitHub
View on GitHub\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "| Author(s) | [Holt Skinner](https://github.com/holtskinner) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "[LlamaIndex](https://www.llamaindex.ai/) is a data framework for developing\n", + "context-augmented large language model (LLM) applications. Context augmentation\n", + "occurs when you apply an LLM to your data. This implements retrieval-augmented\n", + "generation (RAG).\n", + "\n", + "A common problem with LLMs is that they don't understand private knowledge, that\n", + "is, your organization's data. With Knowledge Engine, you can enrich the\n", + "LLM context with additional private information, because the model can reduce\n", + "hallucination and answer questions more accurately.\n", + "\n", + "By combining additional knowledge sources with the existing knowledge that LLMs\n", + "have, a better context is provided. The improved context along with the query\n", + "enhances the quality of the LLM's response.\n", + "\n", + "The following concepts are key to understanding LlamaIndex on\n", + "Vertex AI. These concepts are listed in the order of the\n", + "retrieval-augmented generation (RAG) process.\n", + "\n", + "1. **Data ingestion**: Intake data from different data sources. For example,\n", + " local files, Google Cloud Storage, and Google Drive.\n", + "\n", + "1. **Data transformation**: Conversion of the data in preparation for indexing. For example, data is split into chunks.\n", + "\n", + "1. **Embedding**: Numerical representations of words or pieces of text. These numbers capture the\n", + " semantic meaning and context of the text. Similar or related words or text\n", + " tend to have similar embeddings, which means they are closer together in the\n", + " high-dimensional vector space.\n", + "\n", + "2. **Data indexing**: Knowledge Engine creates an index called a corpus.\n", + " The index structures the knowledge base so it's optimized for searching. For\n", + " example, the index is like a detailed table of contents for a massive\n", + " reference book.\n", + "\n", + "3. **Retrieval**: When a user asks a question or provides a prompt, the retrieval\n", + " component in Knowledge Engine searches through its knowledge\n", + " base to find information that is relevant to the query.\n", + "\n", + "1. **Generation**: The retrieved information becomes the context added to the\n", + " original user query as a guide for the generative AI model to generate\n", + " factually grounded and relevant responses.\n", + "\n", + "For more information, refer to the public documentation for [Vertex AI Knowledge Engine](https://cloud.google.com/vertex-ai/generative-ai/docs/llamaindex-on-vertexai)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --upgrade --user --quiet google-cloud-aiplatform" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
\n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "from vertexai.preview import rag\n", + "from vertexai.preview.generative_models import GenerativeModel, Tool" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e43229f3ad4f" + }, + "source": [ + "### Create a RAG Corpus" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cf93d5f0ce00" + }, + "outputs": [], + "source": [ + "# Currently supports Google first-party embedding models\n", + "EMBEDDING_MODEL = \"publishers/google/models/text-embedding-004\" # @param {type:\"string\", isTemplate: true}\n", + "embedding_model_config = rag.EmbeddingModelConfig(publisher_model=EMBEDDING_MODEL)\n", + "\n", + "rag_corpus = rag.create_corpus(\n", + " display_name=\"my-rag-corpus\", embedding_model_config=embedding_model_config\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "197c585b61b2" + }, + "source": [ + "### Check the corpus just created" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f229b13dc617" + }, + "outputs": [], + "source": [ + "rag.list_corpora()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c52924cc1440" + }, + "source": [ + "### Upload a local file to the corpus" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4976ffe8564f" + }, + "outputs": [], + "source": [ + "%%writefile test.txt\n", + "\n", + "Here's a demo using Knowledge Engine on Vertex AI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "529390917c29" + }, + "outputs": [], + "source": [ + "rag_file = rag.upload_file(\n", + " corpus_name=rag_corpus.name,\n", + " path=\"test.txt\",\n", + " display_name=\"test.txt\",\n", + " description=\"my test file\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5269a0c2786d" + }, + "source": [ + "### Import files from Google Cloud Storage\n", + "\n", + "Remember to grant \"Viewer\" access to the \"Vertex RAG Data Service Agent\" (with the format of `service-{project_number}@gcp-sa-vertex-rag.iam.gserviceaccount.com`) for your Google Cloud Storage bucket.\n", + "\n", + "For this example, we'll use a public GCS bucket containing earning reports from Alphabet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5910ae450f69" + }, + "outputs": [], + "source": [ + "INPUT_GCS_BUCKET = (\n", + " \"gs://cloud-samples-data/gen-app-builder/search/alphabet-investor-pdfs/\"\n", + ")\n", + "\n", + "response = rag.import_files(\n", + " corpus_name=rag_corpus.corpus_name,\n", + " paths=[INPUT_GCS_BUCKET],\n", + " chunk_size=1024, # Optional\n", + " chunk_overlap=100, # Optional\n", + " max_embedding_requests_per_min=900, # Optional\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "60a84095746d" + }, + "source": [ + "### Import files from Google Drive\n", + "\n", + "Eligible paths can be formatted as:\n", + "\n", + "- `https://drive.google.com/drive/folders/{folder_id}`\n", + "- `https://drive.google.com/file/d/{file_id}`.\n", + "\n", + "Remember to grant \"Viewer\" access to the \"Vertex RAG Data Service Agent\" (with the format of `service-{project_number}@gcp-sa-vertex-rag.iam.gserviceaccount.com`) for your Drive folder/files.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0a90c125874c" + }, + "outputs": [], + "source": [ + "response = rag.import_files(\n", + " corpus_name=rag_corpus.name,\n", + " paths=[\"https://drive.google.com/drive/folders/{folder_id}\"],\n", + " chunk_size=512,\n", + " chunk_overlap=50,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f700b3e23121" + }, + "source": [ + "### Optional: Perform direct context retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4669c5cdbb5a" + }, + "outputs": [], + "source": [ + "# Direct context retrieval\n", + "response = rag.retrieval_query(\n", + " rag_resources=[\n", + " rag.RagResource(\n", + " rag_corpus=rag_corpus.name,\n", + " # Optional: supply IDs from `rag.list_files()`.\n", + " # rag_file_ids=[\"rag-file-1\", \"rag-file-2\", ...],\n", + " )\n", + " ],\n", + " text=\"What is RAG and why it is helpful?\",\n", + " similarity_top_k=10, # Optional\n", + " vector_distance_threshold=0.5, # Optional\n", + ")\n", + "print(response)\n", + "\n", + "# Optional: The retrieved context can be passed to any SDK or model generation API to generate final results.\n", + "# context = \" \".join([context.text for context in response.contexts.contexts]).replace(\"\\n\", \"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "79ea89661842" + }, + "source": [ + "### Create RAG Retrieval Tool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0ebceac3d816" + }, + "outputs": [], + "source": [ + "# Create a tool for the RAG Corpus\n", + "rag_retrieval_tool = Tool.from_retrieval(\n", + " retrieval=rag.Retrieval(\n", + " source=rag.VertexRagStore(\n", + " rag_corpora=[rag_corpus.name],\n", + " similarity_top_k=10,\n", + " vector_distance_threshold=0.5,\n", + " ),\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d88fa7ede853" + }, + "source": [ + "### Generate Content with Gemini using Rag Retrieval Tool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8dd928baecd4" + }, + "outputs": [], + "source": [ + "# Load tool into Gemini model\n", + "rag_gemini_model = GenerativeModel(\n", + " \"gemini-1.5-flash-001\", # your self-deployed endpoint\n", + " tools=[rag_retrieval_tool],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "124b36be8d5b" + }, + "outputs": [], + "source": [ + "response = rag_gemini_model.generate_content(\"What is RAG?\")\n", + "\n", + "display(Markdown(response.text))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0268fe43d41c" + }, + "source": [ + "### Generate Content with Llama3 using Rag Retrieval Tool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f6e67ee7968c" + }, + "outputs": [], + "source": [ + "# Load tool into Llama model\n", + "llama_gemini_model = GenerativeModel(\n", + " # your self-deployed endpoint for Llama3\n", + " \"projects/{project}/locations/{location}/endpoints/{endpoint_resource_id}\",\n", + " tools=[rag_retrieval_tool],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c6d710b6dece" + }, + "outputs": [], + "source": [ + "response = rag_gemini_model.generate_content(\"What is RAG?\")\n", + "\n", + "display(Markdown(response.text))" + ] + } + ], + "metadata": { + "colab": { + "name": "intro_knowledge_engine.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 492444907c4da4efc0a8041357cc8910ad6805e6 Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:07:59 -0500 Subject: [PATCH 59/76] docs: Update Directory table in README.md (#1234) Remove third column (Same for most folders) and shorten description of `language/` directory. --- README.md | 38 +++++++------------------------------- 1 file changed, 7 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index a65cc0ea98..4072c0c1ef 100644 --- a/README.md +++ b/README.md @@ -15,13 +15,11 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor [![Applied AI Summit: The cloud toolkit for generative AI](https://img.youtube.com/vi/xT7WW2SKLfE/hqdefault.jpg)](https://www.youtube.com/watch?v=xT7WW2SKLfE) - - - + - - - - - @@ -99,36 +79,32 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor
  • Visual question answering
  • - - - -
    DescriptionContents
    Gemini
    @@ -30,7 +28,6 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor
    Discover Gemini through starter notebooks, use cases, function calling, sample apps, and more. Sample notebooks, apps, use cases
    @@ -39,16 +36,14 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor search/ Use this folder if you're interested in using Vertex AI Search, a Google-managed solution to help you rapidly build search engines for websites and across enterprise data. (Formerly known as Enterprise Search on Generative AI App Builder)Sample apps, use cases
    - RAG Grounding + RAG Grounding
    rag-grounding/
    Use this folder for information on Retrieval Augmented Generation (RAG) and Grounding with Vertex AI. This is an index of notebooks and samples across other directories focused on this topic.Sample apps, use cases
    @@ -57,35 +52,20 @@ For more Vertex AI samples, please visit the [Vertex AI samples GitHub repositor conversation/ Use this folder if you're interested in using Vertex AI Conversation, a Google-managed solution to help you rapidly build chat bots for websites and across enterprise data. (Formerly known as Chat Apps on Generative AI App Builder)Sample apps, use cases
    - Language + Language
    language/
    Use this folder if you're interested in building your own solutions from scratch using Google's language foundation models (Vertex AI PaLM API). - Sample notebooks, apps, use cases
    - Vision + Vision
    vision/
    Sample notebooks, apps, use cases
    - Speech + Speech
    audio/
    Use this folder if you're interested in building your own solutions from scratch using features from Chirp, a version of Google's Universal Speech Model (USM) on Vertex AI (Vertex AI Chirp API). Sample notebooks, apps, use cases
    - Setup Env + Setup Env
    setup-env/
    Instructions on how to set up Google Cloud, the Vertex AI Python SDK, and notebook environments on Google Colab and Vertex AI Workbench.Setup instructions
    - Resources + Resources
    RESOURCES.md
    Learning resources (e.g. blogs, YouTube playlists) about Generative AI on Google CloudResources (e.g. videos, blog posts, learning paths)
    From 3f0974fed57315c6123864fa2334609f3332029a Mon Sep 17 00:00:00 2001 From: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:27:47 -0500 Subject: [PATCH 60/76] ci: Update Black/isort versions for nox and add exception for README in textlintrc (#1236) Co-authored-by: Owl Bot --- .github/linters/.textlintrc | 10 +++++++ .../ui/\360\237\217\240 Home.py" | 4 ++- .../NLP2SQL_using_dynamic_RAG.ipynb | 4 ++- .../utils/intro_multimodal_rag_utils.py | 28 +++++++++---------- ...nslation_training_data_tsv_generator.ipynb | 6 ++-- noxfile.py | 4 +-- 6 files changed, 35 insertions(+), 21 deletions(-) create mode 100644 .github/linters/.textlintrc diff --git a/.github/linters/.textlintrc b/.github/linters/.textlintrc new file mode 100644 index 0000000000..50bc05a4b1 --- /dev/null +++ b/.github/linters/.textlintrc @@ -0,0 +1,10 @@ +{ + "rules": { + "terminology": { + "defaultTerms": true, + "exclude": [ + "README" + ] + } + } +} diff --git "a/gemini/sample-apps/llamaindex-rag/ui/\360\237\217\240 Home.py" "b/gemini/sample-apps/llamaindex-rag/ui/\360\237\217\240 Home.py" index c02bf0c78e..a1e5c07256 100644 --- "a/gemini/sample-apps/llamaindex-rag/ui/\360\237\217\240 Home.py" +++ "b/gemini/sample-apps/llamaindex-rag/ui/\360\237\217\240 Home.py" @@ -1,7 +1,9 @@ import streamlit as st # Set up Streamlit page configuration -st.set_page_config(layout="wide", page_title="LlamaIndex RAG Evaluation", page_icon="🏠") +st.set_page_config( + layout="wide", page_title="LlamaIndex RAG Evaluation", page_icon="🏠" +) # Custom CSS for styling diff --git a/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb b/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb index b633815e87..0efe670b24 100644 --- a/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb +++ b/gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb @@ -1196,7 +1196,9 @@ } ], "source": [ - "text_query = \"展示Foreign Currency Transactions 2023年10月的信息\" # @param {type:\"string\"}\n", + "text_query = (\n", + " \"展示Foreign Currency Transactions 2023年10月的信息\" # @param {type:\"string\"}\n", + ")\n", "find_similar_questions(df, text_query)" ] }, diff --git a/gemini/use-cases/retrieval-augmented-generation/utils/intro_multimodal_rag_utils.py b/gemini/use-cases/retrieval-augmented-generation/utils/intro_multimodal_rag_utils.py index 42d99dff21..df5873dc29 100644 --- a/gemini/use-cases/retrieval-augmented-generation/utils/intro_multimodal_rag_utils.py +++ b/gemini/use-cases/retrieval-augmented-generation/utils/intro_multimodal_rag_utils.py @@ -153,14 +153,14 @@ def get_page_text_embedding(text_data: dict | str) -> dict: if isinstance(text_data, dict): # Process each chunk for chunk_number, chunk_value in text_data.items(): - embeddings_dict[ - chunk_number - ] = get_text_embedding_from_text_embedding_model(text=chunk_value) + embeddings_dict[chunk_number] = ( + get_text_embedding_from_text_embedding_model(text=chunk_value) + ) else: # Process the first 1000 characters of the page text - embeddings_dict[ - "text_embedding" - ] = get_text_embedding_from_text_embedding_model(text=text_data) + embeddings_dict["text_embedding"] = ( + get_text_embedding_from_text_embedding_model(text=text_data) + ) return embeddings_dict @@ -263,10 +263,10 @@ def get_gemini_response( generative_multimodal_model, model_input: list[str], stream: bool = True, - generation_config: GenerationConfig - | None = GenerationConfig(temperature=0.2, max_output_tokens=2048), - safety_settings: dict - | None = { + generation_config: GenerationConfig | None = GenerationConfig( + temperature=0.2, max_output_tokens=2048 + ), + safety_settings: dict | None = { HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, @@ -393,10 +393,10 @@ def get_document_metadata( image_save_dir: str, image_description_prompt: str, embedding_size: int = 128, - generation_config: GenerationConfig - | None = GenerationConfig(temperature=0.2, max_output_tokens=2048), - safety_settings: dict - | None = { + generation_config: GenerationConfig | None = GenerationConfig( + temperature=0.2, max_output_tokens=2048 + ), + safety_settings: dict | None = { HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, diff --git a/language/translation/translation_training_data_tsv_generator.ipynb b/language/translation/translation_training_data_tsv_generator.ipynb index edd05dab7f..4564732913 100644 --- a/language/translation/translation_training_data_tsv_generator.ipynb +++ b/language/translation/translation_training_data_tsv_generator.ipynb @@ -435,9 +435,9 @@ " \"Length of a pair detected to be greater than 200 words.\"\n", " )\n", " print(\"this pair will be skipped\")\n", - " more_than_200_words[\n", - " \" \".join(src_row_data)\n", - " ] = \" \".join(ref_row_data)\n", + " more_than_200_words[\" \".join(src_row_data)] = (\n", + " \" \".join(ref_row_data)\n", + " )\n", " else:\n", " tsv_f.write(\n", " \" \".join(src_row_data)\n", diff --git a/noxfile.py b/noxfile.py index 30eecac2fc..281d851670 100644 --- a/noxfile.py +++ b/noxfile.py @@ -28,8 +28,8 @@ import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black[jupyter]==23.7.0" -ISORT_VERSION = "isort==5.11.0" +BLACK_VERSION = "black[jupyter]==24.8.0" +ISORT_VERSION = "isort==5.13.2" LINT_PATHS = ["."] DEFAULT_PYTHON_VERSION = "3.10" From 36a096749077caa64060e69c85d01ad5009aba09 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Fri, 11 Oct 2024 09:13:06 -0400 Subject: [PATCH 61/76] feat: Add example for Gemini batch predictions using BigQuery input (#1238) # Description Add example for Gemini batch predictions using BigQuery input. We already have a example for batch prediction using Cloud Storage input. --- ...atch_prediction_using_bigquery_input.ipynb | 596 ++++++++++++++++++ 1 file changed, 596 insertions(+) create mode 100644 gemini/batch-prediction/intro_batch_prediction_using_bigquery_input.ipynb diff --git a/gemini/batch-prediction/intro_batch_prediction_using_bigquery_input.ipynb b/gemini/batch-prediction/intro_batch_prediction_using_bigquery_input.ipynb new file mode 100644 index 0000000000..8f082edbe3 --- /dev/null +++ b/gemini/batch-prediction/intro_batch_prediction_using_bigquery_input.ipynb @@ -0,0 +1,596 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ur8xi4C7S06n" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JAPoU8Sm5E6e" + }, + "source": [ + "# Intro to Batch Predictions with the Gemini API using BigQuery input\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
    \n", + " \n", + " \"Google
    Open in Colab\n", + "
    \n", + "
    \n", + " \n", + " \"Google
    Open in Colab Enterprise\n", + "
    \n", + "
    \n", + " \n", + " \"Vertex
    Open in Workbench\n", + "
    \n", + "
    \n", + " \n", + " \"GitHub
    View on GitHub\n", + "
    \n", + "
    " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84f0f73a0f76" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author(s) | [Eric Dong](https://github.com/gericdong) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tvgnzT1CKxrO" + }, + "source": [ + "## Overview\n", + "\n", + "Different from getting online (synchronous) responses, where you are limited to one input request at a time, the batch predictions with the Vertex AI Gemini API allow you to send a large number of multimodal requests to a Gemini model in a single batch request. Then, the model responses asynchronously populate to your storage output location in [Cloud Storage](https://cloud.google.com/storage/docs/introduction) or [BigQuery](https://cloud.google.com/bigquery/docs/storage_overview).\n", + "\n", + "Batch predictions are generally more efficient and cost-effective than online predictions when processing a large number of inputs that are not latency sensitive.\n", + "\n", + "To learn more, see the [Get batch predictions for Gemini](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini) page.\n", + "\n", + "### Objectives\n", + "\n", + "In this tutorial, you learn how to make batch predictions with the Vertex AI Gemini API. This tutorial uses **BigQuery** as an input source and an output location.\n", + "\n", + "You will complete the following tasks:\n", + "\n", + "- Preparing batch inputs and an output location\n", + "- Submitting a batch prediction job\n", + "- Retrieving batch prediction results\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "61RBz8LLbxCR" + }, + "source": [ + "## Get started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "No17Cw5hgx12" + }, + "source": [ + "### Install Vertex AI SDK and other required packages\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tFy3H3aPgx12" + }, + "outputs": [], + "source": [ + "%pip install --upgrade --user --quiet google-cloud-aiplatform" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
    \n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EdvJRUWRNGHE" + }, + "source": [ + "## Code Examples" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5303c05f7aa6" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6fc324893334" + }, + "outputs": [], + "source": [ + "from datetime import datetime\n", + "import time\n", + "\n", + "from google.cloud import bigquery\n", + "import vertexai\n", + "from vertexai.generative_models import GenerativeModel\n", + "from vertexai.preview.batch_prediction import BatchPredictionJob" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a49110dbce84" + }, + "source": [ + "### Initialize Vertex AI SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a4582394ea28" + }, + "outputs": [], + "source": [ + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e43229f3ad4f" + }, + "source": [ + "### Load model\n", + "\n", + "You can find a list of the Gemini models that support batch predictions in the [Multimodal models that support batch predictions](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini#multimodal_models_that_support_batch_predictions) page.\n", + "\n", + "This tutorial uses the Gemini 1.5 Pro (`gemini-1.5-pro-002`) model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cf93d5f0ce00" + }, + "outputs": [], + "source": [ + "MODEL_ID = \"gemini-1.5-pro-002\" # @param {type:\"string\", isTemplate: true}\n", + "\n", + "model = GenerativeModel(MODEL_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1_xZADsak23H" + }, + "source": [ + "### Prepare batch inputs\n", + "\n", + "The input for batch requests specifies the items to send to your model for prediction.\n", + "\n", + "Batch requests for Gemini accept BigQuery storage sources and Cloud Storage sources. You can learn more about the batch input formats for BigQuery and Cloud Storage sources in the [Batch text generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini#prepare_your_inputs) page.\n", + "\n", + "This tutorial uses **BigQuery** as an example. To use a BigQuery table as the input, you must ensure the following:\n", + "\n", + "- The BigQuery dataset must be created in a specific region (e.g. `us-central1`). Multi-region location (e.g. `US`) is not supported.\n", + "- The input table must have a column named `request` in JSON or STRING type.\n", + "- The content in the `request` column must be valid JSON. This JSON data represents your input for the model.\n", + "- The content in the JSON instructions must match the structure of a [GenerateContentRequest](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference).\n", + "- The input table can have columns other than `request`. They are ignored for content generation but included in the output table. The system reserves two column names for output: `response` and `status`. These are used to provide information about the outcome of the batch prediction job.\n", + "- Only public YouTube and Cloud Storage bucket URIs in the `fileData` or `file_data` field are supported in batch prediction.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3d4b751610f0" + }, + "source": [ + "This is an example BigQuery table:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uWb8QzxwbH6W" + }, + "outputs": [], + "source": [ + "INPUT_DATA = \"bq://storage-samples.generative_ai.batch_requests_for_multimodal_input\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b7aeba876320" + }, + "source": [ + "You can query the BigQuery table to review the input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "1414f03a7999" + }, + "outputs": [], + "source": [ + "bq_client = bigquery.Client(project=PROJECT_ID)\n", + "\n", + "bq_table_id = INPUT_DATA.replace(\"bq://\", \"\")\n", + "sql = f\"\"\"\n", + " SELECT *\n", + " FROM {bq_table_id}\n", + " \"\"\"\n", + "\n", + "query_result = bq_client.query(sql)\n", + "\n", + "df = query_result.result().to_dataframe()\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T3jQ59mCsXLc" + }, + "source": [ + "### Prepare batch output location\n", + "\n", + "When a batch prediction task completes, the output is stored in the location that you specified in your request.\n", + "\n", + "- The location is in the form of a Cloud Storage or BigQuery URI prefix, for example:\n", + "`gs://path/to/output/data` or `bq://projectId.bqDatasetId`.\n", + "\n", + "- If not specified, `STAGING_BUCKET/gen-ai-batch-prediction` will be used for Cloud Storage source and `bq://PROJECT_ID.gen_ai_batch_prediction.predictions_TIMESTAMP` will be used for BigQuery source.\n", + "\n", + "This tutorial uses a **BigQuery** table as an example.\n", + "\n", + "- You can specify the URI of your BigQuery table in `BQ_OUTPUT_URI`, or\n", + "- if it is not specified, this tutorial will create a new dataset `bq://PROJECT_ID.gen_ai_batch_prediction` for you." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OtUodwGXZ7US" + }, + "outputs": [], + "source": [ + "BQ_OUTPUT_URI = \"[your-bigquery-table]\" # @param {type:\"string\"}\n", + "\n", + "if BQ_OUTPUT_URI == \"[your-bigquery-table]\":\n", + " bq_dataset_id = \"gen_ai_batch_prediction\"\n", + "\n", + " # The output table will be created automatically if it doesn't exist\n", + " timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n", + " bq_table_id = f\"prediction_result_{timestamp}\"\n", + " BQ_OUTPUT_URI = f\"bq://{PROJECT_ID}.{bq_dataset_id}.{bq_table_id}\"\n", + "\n", + " bq_dataset = bigquery.Dataset(f\"{PROJECT_ID}.{bq_dataset_id}\")\n", + " bq_dataset.location = \"us-central1\"\n", + "\n", + " bq_dataset = bq_client.create_dataset(bq_dataset, exists_ok=True, timeout=30)\n", + " print(\n", + " f\"Created BigQuery dataset {bq_client.project}.{bq_dataset.dataset_id} for batch prediction output.\"\n", + " )\n", + "\n", + "print(f\"BigQuery output URI: {BQ_OUTPUT_URI}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T90CwWDHvonn" + }, + "source": [ + "### Send a batch prediction request\n", + "\n", + "\n", + "You create a batch prediction job using the `BatchPredictionJob.submit()` method. To make a batch prediction request, you specify a source model ID, an input source and an output location, either Cloud Storage or BigQuery, where Vertex AI stores the batch prediction results.\n", + "\n", + "To learn more, see the [Batch prediction API](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/batch-prediction-api) page.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3_PxZdTYbMyg" + }, + "outputs": [], + "source": [ + "job = BatchPredictionJob.submit(\n", + " source_model=MODEL_ID, input_dataset=INPUT_DATA, output_uri_prefix=BQ_OUTPUT_URI\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A-Fo_Kd9FYRj" + }, + "source": [ + "Print out the job status and other properties." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DWq7m79PbjG8" + }, + "outputs": [], + "source": [ + "print(f\"Job resource name: {job.resource_name}\")\n", + "print(f\"Model resource name: {job.model_name}\")\n", + "print(f\"Job state: {job.state.name}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7aJaPNBrGPqK" + }, + "source": [ + "### Wait for the batch prediction job to complete\n", + "\n", + "Depending on the number of input items that you submitted, a batch generation task can take some time to complete. You can use the following code to check the job status and wait for the job to complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dtJDIXdHc0W-" + }, + "outputs": [], + "source": [ + "# Refresh the job until complete\n", + "while not job.has_ended:\n", + " time.sleep(5)\n", + " job.refresh()\n", + "\n", + "# Check if the job succeeds\n", + "if job.has_succeeded:\n", + " print(\"Job succeeded!\")\n", + "else:\n", + " print(f\"Job failed: {job.error}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XWUgAxL-HjN9" + }, + "source": [ + "### Retrieve batch prediction results\n", + "\n", + "When a batch prediction task is complete, the output of the prediction is stored in the Cloud Storage bucket or BigQuery location that you specified in your request.\n", + "\n", + "- When you are using BigQuery, the output of batch prediction is stored in an output dataset. If you had provided a dataset, the name of the dataset (`BQ_OUTPUT_URI`) is the name you had provided earlier. \n", + "- If you did not provide an output dataset, a default dataset `bq://PROJECT_ID.gen_ai_batch_prediction` will be created for you. The name of the table is formed by appending `predictions_` with the timestamp of when the batch prediction job started.\n", + "\n", + "You can print out the exact output location in the `job.output_location` property." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XECxy_2HdHMm" + }, + "outputs": [], + "source": [ + "print(f\"Job output location: {job.output_location}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NWLQEl3mYKO5" + }, + "source": [ + "You can use the example code below to retrieve predictions and store them into a dataframe.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-jLl3es3dTqB" + }, + "outputs": [], + "source": [ + "bq_table_id = job.output_location.replace(\"bq://\", \"\")\n", + "\n", + "sql = f\"\"\"\n", + " SELECT *\n", + " FROM {bq_table_id}\n", + " \"\"\"\n", + "\n", + "query_result = bq_client.query(sql)\n", + "\n", + "df = query_result.result().to_dataframe()\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2a4e033321ad" + }, + "source": [ + "## Cleaning up\n", + "\n", + "Clean up resources created in this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZNCyIKIrdPJY" + }, + "outputs": [], + "source": [ + "# Delete the batch prediction job\n", + "job.delete()" + ] + } + ], + "metadata": { + "colab": { + "name": "intro_batch_prediction_using_bigquery_input.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From fde9bee89ff0c8ed583e6f162130b7d823364fae Mon Sep 17 00:00:00 2001 From: nhootan <103317089+nhootan@users.noreply.github.com> Date: Fri, 11 Oct 2024 13:01:48 -0400 Subject: [PATCH 62/76] fix: Remove dependency on colab from prompt optimizer. (#1241) Co-authored-by: hootan Co-authored-by: Owl Bot Co-authored-by: Holt Skinner --- .github/actions/spelling/allow.txt | 2 ++ gemini/prompts/prompt_optimizer/vapo_lib.py | 3 --- .../prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 084936e4cc..6ec7e7edcc 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -359,6 +359,7 @@ cer cfbundle chatbots chromadb +cimg claude clickable cmap @@ -378,6 +379,7 @@ constexpr corpuses csa cse +ctd cupertino dask dataframe diff --git a/gemini/prompts/prompt_optimizer/vapo_lib.py b/gemini/prompts/prompt_optimizer/vapo_lib.py index 2365f801cc..b60af6b764 100644 --- a/gemini/prompts/prompt_optimizer/vapo_lib.py +++ b/gemini/prompts/prompt_optimizer/vapo_lib.py @@ -21,15 +21,12 @@ from IPython.core.display import DisplayHandle from IPython.display import HTML, display from google.cloud import aiplatform, storage -from google.colab import output import ipywidgets as widgets import jinja2 import jinja2.meta import pandas as pd from tensorflow.io import gfile -output.enable_custom_widget_manager() - def is_target_required_metric(eval_metric: str) -> bool: """Check if the metric requires the target label.""" diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb index a17a30b1c1..d56b179d73 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb @@ -291,8 +291,6 @@ "import json\n", "import time\n", "\n", - "from google.colab import auth\n", - "\n", "timestamp = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n", "display_name = f\"pt_{timestamp}\"\n", "\n", From 115adf9370133395a89efe734b2829c26a3ab294 Mon Sep 17 00:00:00 2001 From: eliasecchig <115624100+eliasecchig@users.noreply.github.com> Date: Fri, 11 Oct 2024 19:51:44 +0200 Subject: [PATCH 63/76] feat: add e2e gen ai app starter pack (#1225) # Description Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Follow the [`CONTRIBUTING` Guide](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md). - [x] You are listed as the author in your notebook or README file. - [x] Your account is listed in [`CODEOWNERS`](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/.github/CODEOWNERS) for the file(s). - [x] Make your Pull Request title in the specification. - [x] Ensure the tests and linter pass (Run `nox -s format` from the repository root to format). - [x] Appropriate docs were updated (if necessary) --------- Co-authored-by: Owl Bot --- .github/CODEOWNERS | 1 + .github/actions/spelling/allow.txt | 10 + .github/linters/.python-lint | 2 +- ...gen_ai_app_starter_kit__lint_and_test.yaml | 29 + .gitignore | 10 + gemini/sample-apps/README.md | 13 +- .../e2e-gen-ai-app-starter-pack/.gitignore | 194 + .../CONTRIBUTING.md | 49 + .../e2e-gen-ai-app-starter-pack/Dockerfile | 17 + .../e2e-gen-ai-app-starter-pack/Makefile | 21 + .../e2e-gen-ai-app-starter-pack/README.md | 202 + .../e2e-gen-ai-app-starter-pack/app/README.md | 65 + .../app/__init__.py | 0 .../e2e-gen-ai-app-starter-pack/app/chain.py | 40 + .../app/eval/data/chats.yaml | 42 + .../app/eval/utils.py | 209 + .../app/patterns/custom_rag_qa/chain.py | 141 + .../app/patterns/custom_rag_qa/templates.py | 56 + .../patterns/custom_rag_qa/vector_store.py | 53 + .../patterns/langgraph_dummy_agent/chain.py | 73 + .../e2e-gen-ai-app-starter-pack/app/server.py | 121 + .../app/utils/__init__.py | 0 .../app/utils/input_types.py | 56 + .../app/utils/output_types.py | 142 + .../app/utils/tracing.py | 150 + .../deployment/README.md | 116 + .../deployment/cd/deploy-to-prod.yaml | 35 + .../deployment/cd/staging.yaml | 137 + .../deployment/ci/pr_checks.yaml | 27 + .../deployment/terraform/apis.tf | 49 + .../deployment/terraform/artifact_registry.tf | 8 + .../deployment/terraform/build_triggers.tf | 80 + .../deployment/terraform/dev/iam.tf | 20 + .../deployment/terraform/dev/log_sinks.tf | 49 + .../terraform/dev/service_accounts.tf | 5 + .../deployment/terraform/dev/storage.tf | 29 + .../deployment/terraform/dev/variables.tf | 64 + .../deployment/terraform/dev/vars/env.tfvars | 16 + .../deployment/terraform/iam.tf | 81 + .../deployment/terraform/log_sinks.tf | 58 + .../deployment/terraform/service_accounts.tf | 15 + .../deployment/terraform/storage.tf | 37 + .../deployment/terraform/variables.tf | 125 + .../deployment/terraform/vars/env.tfvars | 31 + .../notebooks/getting_started.ipynb | 1160 +++ .../e2e-gen-ai-app-starter-pack/poetry.lock | 7864 +++++++++++++++++ .../pyproject.toml | 108 + .../streamlit/side_bar.py | 171 + .../streamlit/streamlit_app.py | 253 + .../streamlit/style/app_markdown.py | 37 + .../streamlit/utils/chat_utils.py | 69 + .../streamlit/utils/local_chat_history.py | 121 + .../streamlit/utils/message_editing.py | 58 + .../streamlit/utils/multimodal_utils.py | 218 + .../streamlit/utils/stream_handler.py | 267 + .../streamlit/utils/title_summary.py | 68 + .../patterns/test_langgraph_dummy_agent.py | 62 + .../tests/integration/patterns/test_rag_qa.py | 63 + .../tests/integration/test_chain.py | 53 + .../tests/integration/test_server_e2e.py | 177 + .../tests/load_test/.results/.placeholder | 0 .../tests/load_test/README.md | 79 + .../tests/load_test/load_test.py | 85 + .../tests/unit/test_server.py | 148 + .../unit/test_utils/test_tracing_exporter.py | 143 + 65 files changed, 13875 insertions(+), 7 deletions(-) create mode 100644 .github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/.gitignore create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/CONTRIBUTING.md create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/Dockerfile create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/Makefile create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/README.md create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/__init__.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/chain.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/data/chats.yaml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/utils.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/chain.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/templates.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/vector_store.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/langgraph_dummy_agent/chain.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/server.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/__init__.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/input_types.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/output_types.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/tracing.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/README.md create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/deploy-to-prod.yaml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/staging.yaml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/ci/pr_checks.yaml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/apis.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/artifact_registry.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/build_triggers.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/iam.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/service_accounts.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/variables.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/vars/env.tfvars create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/iam.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/service_accounts.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/variables.tf create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/vars/env.tfvars create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/notebooks/getting_started.ipynb create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/side_bar.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/streamlit_app.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/style/app_markdown.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/chat_utils.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/local_chat_history.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/message_editing.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/multimodal_utils.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/stream_handler.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/title_summary.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_langgraph_dummy_agent.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_rag_qa.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_chain.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_server_e2e.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/.results/.placeholder create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/README.md create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/load_test.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_server.py create mode 100644 gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_utils/test_tracing_exporter.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1a5743e4c9..1ad39d9748 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -73,3 +73,4 @@ /generative-ai/open-models/serving/vertex_ai_text_generation_inference_gemma.ipynb @alvarobartt @philschmid @pagezyhf @jeffboudier /generative-ai/gemini/use-cases/applying-llms-to-data/semantic-search-in-bigquery/stackoverflow_questions_semantic_search.ipynb @sethijaideep @GoogleCloudPlatform/generative-ai-devrel /generative-ai/gemini/use-cases/retrieval-augmented-generation/raw_with_bigquery.ipynb @jeffonelson @GoogleCloudPlatform/generative-ai-devrel +/generative-ai/gemini/sample-apps/e2e-gen-ai-app-starter-pack @eliasecchig @lspatarog @GoogleCloudPlatform/generative-ai-devrel diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 6ec7e7edcc..4da058dced 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -153,6 +153,7 @@ LSum LTRB LUVBPTK Ladhak +LangGraph Lego Llion Logrus @@ -202,6 +203,7 @@ Oort PDFs PEFT PLOTLYENV +PYINK Parmar Persero Phaidon @@ -270,6 +272,7 @@ Testables Tetsuo Tianli Topolino +Traceloop Trapp Tribbiani Tricyle @@ -359,6 +362,7 @@ cer cfbundle chatbots chromadb +cicd cimg claude clickable @@ -368,6 +372,7 @@ codebases codefile codelab codelabs +codespell colab coldline coloraxis @@ -429,6 +434,7 @@ figsize fillmode fillna firestore +fixmycar flac floormat fmeasure @@ -475,6 +481,7 @@ gsutil gtk guanciale gunicorn +hadolint hashtag hashtags hdlr @@ -678,6 +685,7 @@ terraform textno tfhub tfidf +tfvars tgz thelook tiktoken @@ -685,7 +693,9 @@ timechart titlebar tobytes toself +toset tqdm +traceloop tritan tsv ubuntu diff --git a/.github/linters/.python-lint b/.github/linters/.python-lint index a04c41e380..b6d9e03f2d 100644 --- a/.github/linters/.python-lint +++ b/.github/linters/.python-lint @@ -1,2 +1,2 @@ [MESSAGES CONTROL] -disable=E0401,C0301 +disable=E0401,C0301,R0903,R1710,C0114,R0915,W1514,W1203,I1101 diff --git a/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml new file mode 100644 index 0000000000..419c68f22a --- /dev/null +++ b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml @@ -0,0 +1,29 @@ +name: End-to-end Gen AI starter kit - Unit test + +on: + push: + paths: + - "gemini/sample-apps/e2e-gen-ai-app-starter-pack/**" + pull_request: + paths: + - "gemini/sample-apps/e2e-gen-ai-app-starter-pack/**" + +jobs: + unit-test: + runs-on: ubuntu-latest + defaults: + run: + working-directory: gemini/sample-apps/e2e-gen-ai-app-starter-pack + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Install Poetry and dependencies + run: | + pip install poetry==1.8.3 --user + python -m poetry install + - name: Run tests + run: | + poetry run pytest tests/unit diff --git a/.gitignore b/.gitignore index f516bbe605..5171010bc8 100644 --- a/.gitignore +++ b/.gitignore @@ -145,6 +145,16 @@ dmypy.json language/examples/prompt-design/train.csv README-TOC*.md +## gemini/sample-apps/e2e-gen-ai-app-starter-pack specific +gemini/sample-apps/e2e-gen-ai-app-starter-pack/**/.persist_vector_store +gemini/sample-apps/e2e-gen-ai-app-starter-pack/.saved_chats/ +gemini/sample-apps/e2e-gen-ai-app-starter-pack/.streamlit_chats/ +gemini/sample-apps/e2e-gen-ai-app-starter-pack/**/my_env.tfvars +gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/.results +gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/.results + +# Terraform +terraform.tfstate** .terraform* .Terraform* diff --git a/gemini/sample-apps/README.md b/gemini/sample-apps/README.md index e0c7648622..e7620ff295 100644 --- a/gemini/sample-apps/README.md +++ b/gemini/sample-apps/README.md @@ -10,9 +10,10 @@ We provide instructions for setting up your environment in [Cloud Shell](https:/ ## Sample Applications -| Description | Application Name | Technologies Used | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | -------------------------------------------------------------------------- | -| Develop a Gemini application using [Streamlit](https://streamlit.io/) framework and Vertex AI Gemini API model. | [gemini-streamlit-cloudrun](gemini-streamlit-cloudrun) | Cloud Run, Streamlit, Python | -| Deploy a RAG + Gemini sample application to troubleshoot your car using the owner's manual. | [fixmycar/](fixmycar/) | Chat, Grounding, RAG, Java, Streamlit | -| Try Gemini image recognition in `bash` and see Text-to-Speech read the description to you in ~any language. All from CLI! | [image-bash-jam/](image-bash-jam/) | Text-to-Speech, Bash | -| This demo showcases how you can combine the data and documents you already have and the skills you already know with the power of [AlloyDB AI](https://cloud.google.com/alloydb/ai?hl=en), [Vertex AI](https://cloud.google.com/vertex-ai?hl=en), [Cloud Run](https://cloud.google.com/run?hl=en), and [Cloud Functions](https://cloud.google.com/functions?hl=en) to build trustworthy Gen AI features into your existing applications. | [GenWealth](genwealth/) | Vertex AI, AlloyDB, Document AI, Cloud Run, Cloud Functions, Cloud Storage | +| Description | Application Name | Technologies Used | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | --------------------------------------------------------------------------- | +| Develop a Gemini application using [Streamlit](https://streamlit.io/) framework and Vertex AI Gemini API model. | [gemini-streamlit-cloudrun](gemini-streamlit-cloudrun) | Cloud Run, Streamlit, Python | +| Deploy a RAG + Gemini sample application to troubleshoot your car using the owner's manual. | [fixmycar/](fixmycar/) | Chat, Grounding, RAG, Java, Streamlit | +| Try Gemini image recognition in `bash` and see Text-to-Speech read the description to you in ~any language. All from CLI! | [image-bash-jam/](image-bash-jam/) | Text-to-Speech, Bash | +| This demo showcases how you can combine the data and documents you already have and the skills you already know with the power of [AlloyDB AI](https://cloud.google.com/alloydb/ai?hl=en), [Vertex AI](https://cloud.google.com/vertex-ai?hl=en), [Cloud Run](https://cloud.google.com/run?hl=en), and [Cloud Functions](https://cloud.google.com/functions?hl=en) to build trustworthy Gen AI features into your existing applications. | [GenWealth](genwealth/) | Vertex AI, AlloyDB, Document AI, Cloud Run, Cloud Functions, Cloud Storage | +| End-to-end Gen AI App Starter pack: This folder provides a template starter pack for building a Generative AI application on Google Cloud. It provides a comprehensive set of resources to guide you through the entire development process, from prototype to production. | [e2e-gen-ai-app-starter-pack](e2e-gen-ai-app-starter-pack/) | Vertex AI, FastAPI, LangChain, Cloud Run, Cloud Build, Terraform, Streamlit | diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/.gitignore b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/.gitignore new file mode 100644 index 0000000000..bf43be5cf5 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/.gitignore @@ -0,0 +1,194 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*.pyc +*$py.class +**/dist +/tmp +/out-tsc +/bazel-out + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +Pipfile.lock +Pipfile + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +.venv* +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# macOS +.DS_Store + +# PyCharm +.idea + +# User-specific files +.terraform* +.Terraform* + + +tmp* + +# Node +**/node_modules +npm-debug.log +yarn-error.log + +# IDEs and editors +.idea/ +.project +.classpath +.c9/ +*.launch +.settings/ +*.sublime-workspace + +# Visual Studio Code +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +.history/* + +# Miscellaneous +**/.angular/* +/.angular/cache +.sass-cache/ +/connect.lock +/coverage +/libpeerconnection.log +testem.log +/typings + +# System files +.DS_Store +Thumbs.db +*.vscode* + +.persist_vector_store +tests/load_test/.results/*.html +tests/load_test/.results/*.csv +locust_env +my_env.tfvars +.streamlit_chats +.saved_chats diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/CONTRIBUTING.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/CONTRIBUTING.md new file mode 100644 index 0000000000..34f35f2955 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/CONTRIBUTING.md @@ -0,0 +1,49 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this sample. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution; this simply gives us permission to use and redistribute your contributions as part of the project. Head over to [Google Developers CLA](https://cla.developers.google.com/) to see your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. + +## Community Guidelines, Code Reviews, Contributor Guide + +Please refer to the [root repository CONTRIBUTING.md file](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/CONTRIBUTING.md) for Community Guidelines, Code Reviews, Contributor Guide, or specific guidance for Google Employees. + +## Code Quality Checks + +To ensure code quality, we utilize automated checks. Before submitting a pull request, please run the following commands locally: + +```bash +poetry install --with streamlit,jupyter,lint +``` + +This installs development dependencies, including linting tools. + +Then, execute the following Make command: + +```bash +make lint +``` + +This command runs the following linters to check for code style, potential errors, and type hints: + +- **codespell**: Detects common spelling mistakes in code and documentation. +- **pylint**: Analyzes code for errors, coding standards, and potential problems. +- **flake8**: Enforces style consistency and checks for logical errors. +- **mypy**: Performs static type checking to catch type errors before runtime. +- **black**: Automatically formats Python code to adhere to the PEP 8 style guide. + +```bash +make test +``` + +This command runs the test suite using pytest, covering both unit and integration tests: + +- **`poetry run pytest tests/unit`**: Executes unit tests located in the `tests/unit` directory. +- **`poetry run pytest tests/integration`**: Executes integration tests located in the `tests/integration` directory. + +Your pull request will also be automatically checked by these tools using GitHub Actions. Ensuring your code passes these checks locally will help expedite the review process. diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Dockerfile b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Dockerfile new file mode 100644 index 0000000000..c103b88d30 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.11-slim + +RUN pip install --no-cache-dir poetry==1.6.1 + +RUN poetry config virtualenvs.create false + +WORKDIR /code + +COPY ./pyproject.toml ./README.md ./poetry.lock* ./ + +COPY ./app ./app + +RUN poetry install --no-interaction --no-ansi --no-dev + +EXPOSE 8080 + +CMD ["uvicorn", "app.server:app", "--host", "0.0.0.0", "--port", "8080"] \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Makefile b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Makefile new file mode 100644 index 0000000000..c54f389e71 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/Makefile @@ -0,0 +1,21 @@ +test: + poetry run pytest tests/unit && poetry run pytest tests/integration + +playground: + poetry run uvicorn app.server:app --host 0.0.0.0 --port 8000 --reload & poetry run streamlit run streamlit/streamlit_app.py --browser.serverAddress=localhost --server.enableCORS=false --server.enableXsrfProtection=false + +backend: + poetry run uvicorn app.server:app --host 0.0.0.0 --port 8000 --reload + +frontend: + poetry run streamlit run streamlit/streamlit_app.py --browser.serverAddress=localhost --server.enableCORS=false --server.enableXsrfProtection=false + +load_test: + poetry run locust -f tests/load_test/load_test.py -H $RUN_SERVICE_URL --headless -t 30s -u 60 -r 2 --csv=tests/load_test/.results/results --html=tests/load_test/.results/report.html + +lint: + poetry run codespell + poetry run flake8 . + poetry run pylint . + poetry run mypy . + poetry run black . diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md new file mode 100644 index 0000000000..dfadbfaef2 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md @@ -0,0 +1,202 @@ +# 🚀 End-to-End Gen AI App Starter Pack 🚀 + +> **From Prototype to Production in Minutes.** + +| | | +| ------- | ---------------------------------------------------------------------------------------------- | +| Authors | [Elia Secchi](https://github.com/eliasecchig), [Lorenzo Spataro](https://github.com/lspataroG) | + +This repository provides a template starter pack for building a Generative AI application on Google Cloud. + +We provide a comprehensive set of resources to guide you through the entire development process, from prototype to production. + +This is a suggested approach, and **you can adapt it to fit your specific needs and preferences**. There are multiple ways to build Gen AI applications on Google Cloud, and this template serves as a starting point and example. + +## High-Level Architecture + +This starter pack covers all aspects of Generative AI app development, from prototyping and evaluation to deployment and monitoring. + +![High Level Architecture](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/high_level_architecture.png "Architecture") + +## What's in this Starter Pack? + +
    +A prod-ready FastAPI server + +| Description | Visualization | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| The starter pack includes a production-ready FastAPI server with a real-time chat interface, event streaming, and auto-generated docs. It is designed for scalability and easy integration with monitoring tools. | ![FastAPI docs](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/fastapi_docs.png) | + +
    + +
    +Ready-to-use AI patterns + +| Description | Visualization | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| Start with a variety of common patterns: this repository offers examples including a basic conversational chain, a production-ready RAG (Retrieval-Augmented Generation) chain developed with Python, and a LangGraph agent implementation. Use them in the application by changing one line of code. See the [Readme](app/README.md) for more details. | ![patterns available](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/patterns_available.png) | + +
    + +
    +Integration with Vertex AI Evaluation and Experiments + +| Description | Visualization | +| ---------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| The repository showcases how to evaluate Generative AI applications using tools like Vertex AI rapid eval SDK and Vertex AI Experiments. | ![Vertex AI Rapid Eval](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/vertex_ai_rapid_eval.png) | + +
    + +
    +Unlock Insights with Google Cloud Native Tracing & Logging + +| Description | Visualization | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| Seamlessly integrate with OpenTelemetry, Cloud Trace, Cloud Logging, and BigQuery for comprehensive data collection, and log every step of your Gen AI application to unlock powerful insights. | ![Tracing Preview](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/tracing_preview.png) | + +
    + +
    +Monitor Responses from the application + +| Description | Visualization | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| Monitor your Generative AI application's performance. We provide a Looker Studio [dashboard](https://lookerstudio.google.com/u/0/reporting/fa742264-4b4b-4c56-81e6-a667dd0f853f) to monitor application conversation statistics and user feedback. | ![Dashboard1](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/dashboard_1.png) | +| We can also drill down to individual conversations and view the messages exchanged. | ![Dashboard2](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/dashboard_2.png) | + +
    + +
    +CICD and Terraform + +| Description | Visualization | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| Streamline your deployments with Cloud Build. Enhance reliability through automated testing. The template includes implementation of unit, integration, and load tests, and a set of Terraform resources for you to set up your own Google Cloud project in a matter of minutes. | ![cicd](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/cicd.png) | + +
    + +
    +A comprehensive UI Playground + +| Description | Visualization | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| Experiment with your Generative AI application in a feature-rich playground, including chat curation, user feedback collection, multimodal input, and more! | ![Streamlit View](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/streamlit_view.png) | + +
    + +## Getting Started + +### Prerequisites + +- Python 3.10+ +- Google Cloud SDK installed and configured +- [Poetry](https://python-poetry.org/docs/#installation) for dependency management + +### Download the starter pack + +```bash +gsutil cp gs://e2e-gen-ai-app-starter-pack/app-starter-pack.zip . && unzip app-starter-pack.zip && cd app-starter-pack +``` + +Use the downloaded folder as a starting point for your own Generative AI application. + +### Installation + +Install required packages using Poetry: + +```bash +poetry install --with streamlit,jupyter +``` + +### Setup + +Set your default Google Cloud project and region: + +```bash +export PROJECT_ID="YOUR_PROJECT_ID" +export REGION="YOUR_REGION" +gcloud config set project $PROJECT_ID +gcloud config set region $REGION +``` + +## Commands + +| Command | Description | +| -------------------- | ------------------------------------------------------------------------------------------- | +| `make playground` | Start the backend and frontend for local playground execution | +| `make test` | Run unit and integration tests | +| `make load_test` | Execute load tests (see [tests/load_test/README.md](tests/load_test/README.md) for details) | +| `poetry run jupyter` | Launch Jupyter notebook | + +For full command options and usage, refer to the [Makefile](Makefile). + +## Usage + +1. **Prototype Your Chain:** Build your Generative AI application using different methodologies and frameworks. Use Vertex AI Evaluation for assessing the performance of your application and its chain of steps. **See [`notebooks/getting_started.ipynb`](notebooks/getting_started.ipynb) for a tutorial to get started building and evaluating your chain.** +2. **Integrate into the App:** Import your chain into the app. Edit the `app/chain.py` file to add your chain. +3. **Playground Testing:** Explore your chain's functionality using the Streamlit playground. Take advantage of the comprehensive playground features, such as chat history management, user feedback mechanisms, support for various input types, and additional capabilities. You can run the playground locally with the `make playground` command. +4. **Deploy with CI/CD:** Configure and trigger the CI/CD pipelines. Edit tests if needed. See the [deployment section](#deployment) below for more details. +5. **Monitor in Production:** Track performance and gather insights using Cloud Logging, Tracing, and the Looker Studio dashboard. Use the gathered data to iterate on your Generative AI application. + +## Deployment + +### Dev Environment + +You can test deployment towards a Dev Environment using the following command: + +```bash +gcloud run deploy genai-app-sample --source . --project YOUR_DEV_PROJECT_ID +``` + +The repository includes a Terraform configuration for the setup of the Dev Google Cloud project. +See [deployment/README.md](deployment/README.md) for instructions. + +### Production Deployment with Terraform + +![Deployment Workflow](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/deployment_workflow.png) + +**Quick Start:** + +1. Enable required APIs in the CI/CD project. + + ```bash + gcloud config set project YOUR_CI_CD_PROJECT_ID + gcloud services enable serviceusage.googleapis.com cloudresourcemanager.googleapis.com cloudbuild.googleapis.com secretmanager.googleapis.com + ``` + +2. Create a Git repository (GitHub, GitLab, Bitbucket). +3. Connect to Cloud Build following [Cloud Build Repository Setup](https://cloud.google.com/build/docs/repositories#whats_next). +4. Configure [`deployment/terraform/vars/env.tfvars`](deployment/terraform/vars/env.tfvars) with your project details. +5. Deploy infrastructure: + + ```bash + cd deployment/terraform + terraform init + terraform apply --var-file vars/env.tfvars + ``` + +6. Perform a commit and push to the repository to see the CI/CD pipelines in action! + +For detailed deployment instructions, refer to [deployment/README.md](deployment/README.md). + +## Contributing + +Contributions are welcome! See the [Contributing Guide](CONTRIBUTING.md). + +## Feedback + +We value your input! Your feedback helps us improve this starter pack and make it more useful for the community. + +### Getting Help + +If you encounter any issues or have specific suggestions, please first consider [raising an issue](https://github.com/GoogleCloudPlatform/generative-ai/issues) on our GitHub repository. + +### Share Your Experience + +For other types of feedback, or if you'd like to share a positive experience or success story using this starter pack, we'd love to hear from you! You can reach out to us at [e2e-gen-ai-app-starter-pack@google.com](mailto:e2e-gen-ai-app-starter-pack@google.com). + +Thank you for your contributions! + +## Disclaimer + +This repository is for demonstrative purposes only and is not an officially supported Google product. diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/README.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/README.md new file mode 100644 index 0000000000..7f96c1a27c --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/README.md @@ -0,0 +1,65 @@ +# Sample Chatbot Application + +This folder implements a chatbot application using FastAPI, and Google Cloud services. It supports multiple conversation patterns and can be easily extended with new chains. + +## Folder Structure + +```plaintext +. +├── server.py # Main FastAPI server +├── chain.py # Default chain implementation +├── patterns/ # Conversation pattern implementations +│ ├── custom_rag_qa/ +│ └── langgraph_dummy_agent/ +├── utils/ # Utility functions and classes +└── eval/ # Evaluation tools and data +``` + +## Generative AI Application Patterns + +### 1. Default Chain + +The default chain is a simple conversational bot that produces recipes based on user questions. + +### 2. Custom RAG QA + +A RAG (Retrieval-Augmented Generation) chain using Python for orchestration and base LangChain components. The chain demonstrates how to create a production-grade application with full control over the orchestration process. + +This approach offers maximum flexibility in the orchestration of steps and allows for seamless integration with other SDK frameworks such as [Vertex AI SDK](https://cloud.google.com/vertex-ai/docs/python-sdk/use-vertex-ai-python-sdk) and [LangChain](https://python.langchain.com/), retaining the support to emit `astream_events` [API compatible events](https://python.langchain.com/docs/how_to/streaming/#using-stream-events). + +### 3. LangGraph Dummy Agent + +A simple agent implemented using LangGraph, a framework for building agent and multi-agent workflows. + +### Switching Between Patterns + +To switch between different patterns, modify the import statement in `server.py`. + +All chains have the same interface, allowing for seamless swapping without changes to the Streamlit frontend. + +## Monitoring and Observability + +![monitoring_flow](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/monitoring_flow.png) + +### Trace and Log Capture + +This application utilizes [OpenTelemetry](https://opentelemetry.io/) and [OpenLLMetry](https://github.com/traceloop/openllmetry) for comprehensive observability, emitting events to Google Cloud Trace and Google Cloud Logging. Every interaction with LangChain and VertexAI is instrumented (see [`server.py`](server.py)), enabling detailed tracing of request flows throughout the application. + +Leveraging the [CloudTraceSpanExporter](https://cloud.google.com/python/docs/reference/spanner/latest/opentelemetry-tracing), the application captures and exports tracing data. To address the limitations of Cloud Trace ([256-byte attribute value limit](https://cloud.google.com/trace/docs/quotas#limits_on_spans)) and [Cloud Logging](https://cloud.google.com/logging/quotas) ([256KB log entry size](https://cloud.google.com/logging/quotas)), a custom extension of the CloudTraceSpanExporter is implemented in [`app/utils/tracing.py`](app/utils/tracing.py). + +This extension enhances observability by: + +- Creating a corresponding Google Cloud Logging entry for every captured event. +- Automatically storing event data in Google Cloud Storage when the payload exceeds 256KB. + +Logged payloads are associated with the original trace, ensuring seamless access from the Cloud Trace console. + +### Log Router + +Events are forwarded to BigQuery through a [log router](https://cloud.google.com/logging/docs/routing/overview) for long-term storage and analysis. The deployment of the log router is done via Terraform code in [deployment/terraform](../deployment/terraform). + +### Looker Studio Dashboard + +Once the data is written to BigQuery, it can be used to populate a [Looker Studio dashboard](https://lookerstudio.google.com/c/reporting/fa742264-4b4b-4c56-81e6-a667dd0f853f/page/tEnnC). + +This dashboard, offered as a template, provides a starting point for building custom visualizations on the top of the data being captured. diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/__init__.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/chain.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/chain.py new file mode 100644 index 0000000000..d367bd1ad7 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/chain.py @@ -0,0 +1,40 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_google_vertexai import ChatVertexAI + +llm = ChatVertexAI( + model_name="gemini-1.5-flash-002", + temperature=0, + max_output_tokens=1024, +) + + +template = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are a knowledgeable culinary assistant specializing in providing" + "detailed cooking recipes. Your responses should be informative, engaging, " + "and tailored to the user's specific requests. Include ingredients, " + "step-by-step instructions, cooking times, and any helpful tips or " + "variations. If asked about dietary restrictions or substitutions, offer " + "appropriate alternatives.", + ), + MessagesPlaceholder(variable_name="messages"), + ] +) + +chain = template | llm diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/data/chats.yaml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/data/chats.yaml new file mode 100644 index 0000000000..1eb4f9bdb6 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/data/chats.yaml @@ -0,0 +1,42 @@ +- messages: + - type: human + content: Hi + - type: ai + content: Hi, how can I help you? + - type: human + content: I'm looking for a recipe for a healthy dinner. Do you have any recommendations? + - type: ai + content: Sure, I can help you with that. What are your dietary restrictions? Are you vegetarian, vegan, gluten-free, or anything else? + - type: human + content: I'm not vegetarian or vegan, but I am gluten-free. + - type: ai + content: "Okay, I ll keep that in mind. Here are a few recipes that I think you might like: * **Grilled Salmon with Roasted Vegetables:** This is a delicious and healthy recipe that is perfect for a weeknight meal. The salmon is grilled to perfection and the roasted vegetables add a touch of sweetness. * **Chicken Stir-Fry:** This is a quick and easy stir-fry that is perfect for busy weeknights. The chicken is cooked with vegetables and a light sauce. * **Lentil Soup:** This is a hearty and healthy soup that is perfect for a cold winter day. The lentils are packed with protein and fiber, and the soup is also gluten-free." + - type: human + content: Those all sound great! I think I'm going to try the grilled salmon with roasted vegetables. + - type: ai + content: That's a great choice! I hope you enjoy it. + - type: human + content: Thanks for your help! + - type: ai + content: You're welcome! Is there anything else I can help you with today? + - type: human + content: No, that's all. Thanks again! + - type: ai + content: You're welcome! Have a great day! +- messages: + - type: human + content: Hi + - type: ai + content: Hi, how can I help you? + - type: human + content: I'm looking for a recipe for a romantic dinner. Do you have any recommendations? + - type: ai + content: Sure, I can help you with that. What are your dietary restrictions? Are you vegetarian, vegan, gluten-free, or anything else? + - type: human + content: I'm vegetarian. + - type: ai + content: "Sure, I can help you find a healthy vegetarian dinner recipe. Here are a few ideas: * **Burnt aubergine veggie chilli:** This is a hearty and flavorful dish that is packed with nutrients. The roasted aubergine gives it a smoky flavor, and the lentils and beans add protein and fiber. * **Simple mushroom curry:** This is a quick and easy curry that is perfect for a weeknight meal. The mushrooms are cooked in a creamy sauce with spices, and the whole dish is ready in under 30 minutes. * **Vegetarian enchiladas:** This is a classic Mexican dish that is easy to make vegetarian. The enchiladas are filled with a variety of vegetables, and they are topped with a delicious sauce. * **Braised sesame tofu:** This is a flavorful and satisfying dish that is perfect for a cold night. The tofu is braised in a sauce with sesame, ginger, and garlic, and it is served over rice or noodles. * **Roast garlic & tahini spinach:** This is a light and healthy dish that is perfect for a spring or summer meal. The spinach is roasted with garlic and tahini, and it is served with a side of pita bread. These are just a few ideas to get you started. There are many other great vegetarian dinner recipes out there, so you are sure to find something that you will enjoy." + - type: human + content: Those all sound great! I like the Burnt aubergine veggie chilli + - type: ai + content: That's a great choice! I hope you enjoy it. diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/utils.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/utils.py new file mode 100644 index 0000000000..deb735fb02 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/eval/utils.py @@ -0,0 +1,209 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from concurrent.futures import ThreadPoolExecutor +from functools import partial +import glob +import logging +from typing import Any, Callable, Dict, Iterator, List + +import nest_asyncio +import pandas as pd +from tqdm import tqdm +import yaml + +nest_asyncio.apply() + + +def load_chats(path: str) -> List[Dict[str, Any]]: + """ + Loads a list of chats from a directory or file. + + Args: + path (str): The path to the directory or file containing the chats. + + Returns: + List[Dict[str, Any]]: A list of chats. + """ + + chats: List[Dict[str, Any]] = [] + for file_path in glob.glob(path): + with open(file_path) as f: + chats_in_file = yaml.safe_load(f) + chats = chats + chats_in_file + return chats + + +def pairwise(iterable: List[Any]) -> Iterator[tuple[Any, Any]]: + """Creates an iterable with tuples paired together + e.g s -> (s0, s1), (s2, s3), (s4, s5), ... + """ + a = iter(iterable) + return zip(a, a) + + +def _process_conversation(row: Dict[str, List[str]]) -> List[Dict[str, Any]]: + """Processes a single conversation row to extract messages and build conversation history.""" + conversation_history: List[Dict] = [] + messages = [] + for human_message, ai_message in pairwise(row["messages"]): + messages.append( + { + "human_message": human_message, + "ai_message": ai_message, + "conversation_history": conversation_history.copy(), + } + ) + conversation_history.extend([human_message, ai_message]) + return messages + + +def generate_multiturn_history(df: pd.DataFrame) -> pd.DataFrame: + """Processes a DataFrame of conversations to create a multi-turn history. + + This function iterates through a DataFrame where each row represents a conversation. + It extracts human and AI messages from the "messages" column and structures them + into a new DataFrame. Each row in the output DataFrame represents a single turn + in a conversation, including the human message, AI message, and the conversation + history up to that point. + + Args: + df (pd.DataFrame): A DataFrame where each row represents a conversation. + The DataFrame should have a column named "messages" containing + a list of alternating human and AI messages. + + Returns: + pd.DataFrame: A DataFrame where each row represents a single turn in a conversation. + The DataFrame has the following columns: + - human_message: The human message in that turn. + - ai_message: The AI message in that turn. + - conversation_history: A list of all messages in the conversation + up to and including the current turn. + """ + processed_messages = df.apply(_process_conversation, axis=1).explode().tolist() + return pd.DataFrame(processed_messages) + + +def generate_message(row: tuple[int, Dict[str, Any]], runnable: Any) -> Dict[str, Any]: + """Generates a response message using a given runnable and updates the row dictionary. + + This function takes a row dictionary containing message data and a runnable object. + It extracts conversation history and the current human message from the row, + then uses the runnable to generate a response based on the conversation history. + The generated response content and usage metadata are then added to the original + message dictionary within the row. + + Args: + row (tuple[int, Dict[str, Any]]): A tuple containing the index and a dictionary + with message data, including: + - "conversation_history" (List[str]): Optional. List of previous + messages + in the conversation. + - "human_message" (str): The current human message. + runnable (Any): A runnable object that takes a dictionary with a "messages" key + and returns a response object with "content" and + "usage_metadata" attributes. + + Returns: + Dict[str, Any]: The updated row dictionary with the generated response added to the message. + The message will now contain: + - "response" (str): The generated response content. + - "response_obj" (Any): The usage metadata of the response from the runnable. + """ + _, message = row + messages = ( + message["conversation_history"] if "conversation_history" in message else [] + ) + messages.append(message["human_message"]) + input_runnable = {"messages": messages} + response = runnable.invoke(input_runnable) + message["response"] = response.content + message["response_obj"] = response.usage_metadata + return message + + +def batch_generate_messages( + messages: pd.DataFrame, + runnable: Callable[[List[Dict[str, Any]]], Dict[str, Any]], + max_workers: int = 4, +) -> pd.DataFrame: + """Generates AI responses to user messages using a provided runnable. + + Processes a Pandas DataFrame containing conversation histories and user messages, utilizing + the specified runnable to predict AI responses in parallel. + + Args: + messages (pd.DataFrame): DataFrame with a 'messages' column. Each row + represents a conversation and contains a list of dictionaries, where + each dictionary + represents a message turn in the format: + + ```json + [ + {"type": "human", "content": "user's message"}, + {"type": "ai", "content": "AI's response"}, + {"type": "human", "content": "current user's message"}, + ... + ] + ``` + + runnable (Callable[[List[Dict[str, Any]]], Dict[str, Any]]): Runnable object + (e.g., LangChain Chain) used + for response generation. It should accept a list of message dictionaries + (as described above) and return a dictionary with the following structure: + + ```json + { + "response": "AI's response", + "response_obj": { ... } # optional response metadata + } + ``` + + max_workers (int, optional): Number of worker processes for parallel + prediction. Defaults to 4. + + Returns: + pd.DataFrame: DataFrame with the original 'messages' column and two new + columns: 'response' containing the predicted AI responses, and + 'response_obj' containing optional response metadata. + + Example: + ```python + import pandas as pd + + messages_df = pd.DataFrame({ + "messages": [ + [ + {"type": "human", "content": "What's the weather today?"} + ], + [ + {"type": "human", "content": "Tell me a joke."}, + {"type": "ai", "content": "Why did the scarecrow win an award?"}, + {"type": "human", "content": "I don't know, why?"} + ] + ] + }) + + responses_df = batch_generate_messages(my_runnable, messages_df) + ``` + """ + logging.info("Executing batch scoring") + predicted_messages = [] + with ThreadPoolExecutor(max_workers) as pool: + partial_func = partial(generate_message, runnable=runnable) + for message in tqdm( + pool.map(partial_func, messages.iterrows()), total=len(messages) + ): + predicted_messages.append(message) + return pd.DataFrame(predicted_messages) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/chain.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/chain.py new file mode 100644 index 0000000000..1bdad8c9d4 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/chain.py @@ -0,0 +1,141 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# mypy: disable-error-code="arg-type,attr-defined" +# pylint: disable=W0613, W0622 + +import logging +from typing import Any, Dict, Iterator, List + +from app.patterns.custom_rag_qa.templates import ( + inspect_conversation_template, + rag_template, + template_docs, +) +from app.patterns.custom_rag_qa.vector_store import get_vector_store +from app.utils.output_types import OnChatModelStreamEvent, OnToolEndEvent, custom_chain +import google +from langchain.schema import Document +from langchain.tools import tool +from langchain_core.messages import ToolMessage +from langchain_google_community.vertex_rank import VertexAIRank +from langchain_google_vertexai import ChatVertexAI, VertexAIEmbeddings +import vertexai + +# Configuration +EMBEDDING_MODEL = "text-embedding-004" +LLM_MODEL = "gemini-1.5-flash-002" +TOP_K = 5 + +# Initialize logging +logging.basicConfig(level=logging.INFO) + +# Initialize Google Cloud and Vertex AI +credentials, project_id = google.auth.default() +vertexai.init(project=project_id) + +# Set up embedding model and vector store +embedding = VertexAIEmbeddings(model_name=EMBEDDING_MODEL) +vector_store = get_vector_store(embedding=embedding) +retriever = vector_store.as_retriever(search_kwargs={"k": 20}) + +# Initialize document compressor +compressor = VertexAIRank( + project_id=project_id, + location_id="global", + ranking_config="default_ranking_config", + title_field="id", + top_n=TOP_K, +) + + +@tool +def retrieve_docs(query: str) -> List[Document]: + """ + Useful for retrieving relevant documents based on a query. + Use this when you need additional information to answer a question. + + Args: + query (str): The user's question or search query. + + Returns: + List[Document]: A list of the top-ranked Document objects, limited to TOP_K (5) results. + """ + retrieved_docs = retriever.invoke(query) + ranked_docs = compressor.compress_documents(documents=retrieved_docs, query=query) + return ranked_docs + + +@tool +def should_continue() -> None: + """ + Use this tool if you determine that you have enough context to respond to the questions of the user. + """ + return None + + +# Initialize language model +llm = ChatVertexAI(model=LLM_MODEL, temperature=0, max_tokens=1024) + +# Set up conversation inspector +inspect_conversation = inspect_conversation_template | llm.bind_tools( + [retrieve_docs, should_continue], tool_choice="any" +) + +# Set up response chain +response_chain = rag_template | llm + + +@custom_chain +def chain( + input: Dict[str, Any], **kwargs: Any +) -> Iterator[OnToolEndEvent | OnChatModelStreamEvent]: + """ + Implement a RAG QA chain with tool calls. + + This function is decorated with `custom_chain` to offer LangChain compatible + astream_events, support for synchronous invocation through the `invoke` method, + and OpenTelemetry tracing. + """ + # Inspect conversation and determine next action + inspection_result = inspect_conversation.invoke(input) + tool_call_result = inspection_result.tool_calls[0] + + # Execute the appropriate tool based on the inspection result + if tool_call_result["name"] == "retrieve_docs": + # Retrieve relevant documents + docs = retrieve_docs.invoke(tool_call_result["args"]) + # Format the retrieved documents + formatted_docs = template_docs.format(docs=docs) + # Create a ToolMessage with the formatted documents + tool_message = ToolMessage( + tool_call_id=tool_call_result["name"], + name=tool_call_result["name"], + content=formatted_docs, + artifact=docs, + ) + else: + # If no documents need to be retrieved, continue with the conversation + tool_message = should_continue.invoke(tool_call_result) + + # Update input messages with new information + input["messages"] = input["messages"] + [inspection_result, tool_message] + + # Yield tool results metadata + yield OnToolEndEvent( + data={"input": tool_call_result["args"], "output": tool_message} + ) + + # Stream LLM response + for chunk in response_chain.stream(input=input): + yield OnChatModelStreamEvent(data={"chunk": chunk}) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/templates.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/templates.py new file mode 100644 index 0000000000..75b11b2b5c --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/templates.py @@ -0,0 +1,56 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# flake8: noqa: W291 + +from langchain_core.prompts import ( + ChatPromptTemplate, + MessagesPlaceholder, + PromptTemplate, +) + +template_docs = PromptTemplate.from_template( + """## Context provided: +{% for doc in docs%} + +{{ doc.page_content | safe }} + +{% endfor %} +""", + template_format="jinja2", +) + +inspect_conversation_template = ChatPromptTemplate.from_messages( + [ + ( + "system", + """You are an AI assistant tasked with analyzing the conversation " +and determining the best course of action.""", + ), + MessagesPlaceholder(variable_name="messages"), + ] +) + +rag_template = ChatPromptTemplate.from_messages( + [ + ( + "system", + """You are an AI assistant for question-answering tasks. + +Answer to the best of your ability using the context provided. +If you're unsure, it's better to acknowledge limitations than to speculate. +""", + ), + MessagesPlaceholder(variable_name="messages"), + ] +) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/vector_store.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/vector_store.py new file mode 100644 index 0000000000..017d1383a1 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/custom_rag_qa/vector_store.py @@ -0,0 +1,53 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from typing import List + +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.document_loaders import PyPDFLoader +from langchain_community.vectorstores import SKLearnVectorStore +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings + +PERSIST_PATH = ".persist_vector_store" +URL = "https://services.google.com/fh/files/misc/practitioners_guide_to_mlops_whitepaper.pdf" + + +def load_and_split_documents(url: str) -> List[Document]: + """Load and split documents from a given URL.""" + loader = PyPDFLoader(url) + documents = loader.load() + logging.info(f"# of documents loaded (pre-chunking) = {len(documents)}") + + text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) + doc_splits = text_splitter.split_documents(documents) + logging.info(f"# of documents after split = {len(doc_splits)}") + + return doc_splits + + +def get_vector_store( + embedding: Embeddings, persist_path: str = PERSIST_PATH, url: str = URL +) -> SKLearnVectorStore: + """Get or create a vector store.""" + vector_store = SKLearnVectorStore(embedding=embedding, persist_path=persist_path) + + if not os.path.exists(persist_path): + doc_splits = load_and_split_documents(url=url) + vector_store.add_documents(documents=doc_splits) + vector_store.persist() + + return vector_store diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/langgraph_dummy_agent/chain.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/langgraph_dummy_agent/chain.py new file mode 100644 index 0000000000..75a32bc0b9 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/patterns/langgraph_dummy_agent/chain.py @@ -0,0 +1,73 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# mypy: disable-error-code="unused-ignore, union-attr" + +from typing import Dict + +from langchain_core.messages import BaseMessage +from langchain_core.runnables import RunnableConfig +from langchain_core.tools import tool +from langchain_google_vertexai import ChatVertexAI +from langgraph.graph import END, MessagesState, StateGraph +from langgraph.prebuilt import ToolNode + + +# 1. Define tools +@tool +def search(query: str) -> str: + """Simulates a web search. Use it get information on weather""" + if "sf" in query.lower() or "san francisco" in query.lower(): + return "It's 60 degrees and foggy." + return "It's 90 degrees and sunny." + + +tools = [search] + +# 2. Set up the language model +llm = ChatVertexAI( + model="gemini-1.5-pro-002", temperature=0, max_tokens=1024, streaming=True +).bind_tools(tools) + + +# 3. Define workflow components +def should_continue(state: MessagesState) -> str: + """Determines whether to use tools or end the conversation.""" + last_message = state["messages"][-1] + return "tools" if last_message.tool_calls else END + + +async def call_model( + state: MessagesState, config: RunnableConfig +) -> Dict[str, BaseMessage]: + """Calls the language model and returns the response.""" + system_message = "You are a helpful AI assistant." + messages_with_system = [{"type": "system", "content": system_message}] + state[ + "messages" + ] + response = llm.invoke(messages_with_system, config) + return {"messages": response} + + +# 4. Create the workflow graph +workflow = StateGraph(MessagesState) +workflow.add_node("agent", call_model) +workflow.add_node("tools", ToolNode(tools)) +workflow.set_entry_point("agent") + +# 5. Define graph edges +workflow.add_conditional_edges("agent", should_continue) +workflow.add_edge("tools", "agent") + +# 6. Compile the workflow +chain = workflow.compile() diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/server.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/server.py new file mode 100644 index 0000000000..c9a8449746 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/server.py @@ -0,0 +1,121 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0718, C0411 +# ruff: noqa: I001 + +import json +import logging +import os +from typing import AsyncGenerator +import uuid + +from app.chain import chain +from app.utils.input_types import Feedback, Input, InputChat, default_serialization +from app.utils.output_types import EndEvent, Event +from app.utils.tracing import CloudTraceLoggingSpanExporter +from fastapi import FastAPI +from fastapi.responses import RedirectResponse, StreamingResponse +from google.cloud import logging as google_cloud_logging +from traceloop.sdk import Instruments, Traceloop + +# Default chain +# from app.chain import chain + +# Or choose one of the following pattern chains to test by uncommenting it: + +# Custom RAG QA +# from app.patterns.custom_rag_qa.chain import chain + +# LangGraph dummy agent +# from app.patterns.langgraph_dummy_agent.chain import chain + +# The events that are supported by the UI Frontend +SUPPORTED_EVENTS = [ + "on_tool_start", + "on_tool_end", + "on_retriever_start", + "on_retriever_end", + "on_chat_model_stream", +] + +# Initialize FastAPI app and logging +app = FastAPI() +logging_client = google_cloud_logging.Client() +logger = logging_client.logger(__name__) + +# Initialize Traceloop +try: + Traceloop.init( + app_name="Sample Chatbot Application", + disable_batch=False, + exporter=CloudTraceLoggingSpanExporter(), + instruments={Instruments.VERTEXAI, Instruments.LANGCHAIN}, + ) +except Exception as e: + logging.error("Failed to initialize Traceloop: %s", e) + + +async def stream_event_response(input_chat: InputChat) -> AsyncGenerator[str, None]: + """Stream events in response to an input chat.""" + run_id = uuid.uuid4() + input_dict = input_chat.model_dump() + + Traceloop.set_association_properties( + { + "log_type": "tracing", + "run_id": str(run_id), + "user_id": input_dict["user_id"], + "session_id": input_dict["session_id"], + "commit_sha": os.environ.get("COMMIT_SHA", "None"), + } + ) + + yield json.dumps( + Event(event="metadata", data={"run_id": str(run_id)}), + default=default_serialization, + ) + "\n" + + async for data in chain.astream_events(input_dict, version="v2"): + if data["event"] in SUPPORTED_EVENTS: + yield json.dumps(data, default=default_serialization) + "\n" + + yield json.dumps(EndEvent(), default=default_serialization) + "\n" + + +# Routes +@app.get("/") +async def redirect_root_to_docs() -> RedirectResponse: + """Redirect the root URL to the API documentation.""" + return RedirectResponse("/docs") + + +@app.post("/feedback") +async def collect_feedback(feedback_dict: Feedback) -> None: + """Collect and log feedback.""" + logger.log_struct(feedback_dict.model_dump(), severity="INFO") + + +@app.post("/stream_events") +async def stream_chat_events(request: Input) -> StreamingResponse: + """Stream chat events in response to an input request.""" + return StreamingResponse( + stream_event_response(input_chat=request.input), media_type="text/event-stream" + ) + + +# Main execution +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/__init__.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/input_types.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/input_types.py new file mode 100644 index 0000000000..49be86317c --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/input_types.py @@ -0,0 +1,56 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Annotated, Any, List, Literal, Optional, Union + +from langchain_core.messages import AIMessage, HumanMessage, ToolMessage +from pydantic import BaseModel, Field + + +class InputChat(BaseModel): + """Represents the input for a chat session.""" + + messages: List[ + Annotated[ + Union[HumanMessage, AIMessage, ToolMessage], Field(discriminator="type") + ] + ] = Field( + ..., description="The chat messages representing the current conversation." + ) + user_id: str = "" + session_id: str = "" + + +class Input(BaseModel): + """Wrapper class for InputChat.""" + + input: InputChat + + +class Feedback(BaseModel): + """Represents feedback for a conversation.""" + + score: Union[int, float] + text: Optional[str] = "" + run_id: str + log_type: Literal["feedback"] = "feedback" + + +def default_serialization(obj: Any) -> Any: + """ + Default serialization for LangChain objects. + Converts BaseModel instances to dictionaries. + """ + if isinstance(obj, BaseModel): + return obj.model_dump() diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/output_types.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/output_types.py new file mode 100644 index 0000000000..f10f0a858b --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/output_types.py @@ -0,0 +1,142 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import wraps +from types import GeneratorType +from typing import Any, AsyncGenerator, Callable, Dict, Literal +import uuid + +from langchain_core.messages import AIMessage, AIMessageChunk, ToolMessage +from pydantic import BaseModel, Field +from traceloop.sdk import TracerWrapper +from traceloop.sdk.decorators import workflow + + +class BaseCustomChainEvent(BaseModel): + """Base class for custom chain events.""" + + name: str = "custom_chain_event" + + class Config: + """Allow extra fields in the model.""" + + extra = "allow" + + +class OnToolStartEvent(BaseCustomChainEvent): + """Event representing the start of a tool execution.""" + + event: Literal["on_tool_start"] = "on_tool_start" + input: Dict = {} + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + + +class ToolData(BaseModel): + """Data structure for tool input and output.""" + + input: Dict = {} + output: ToolMessage + + +class OnToolEndEvent(BaseCustomChainEvent): + """Event representing the end of a tool execution.""" + + event: Literal["on_tool_end"] = "on_tool_end" + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + data: ToolData + + +class ChatModelStreamData(BaseModel): + """Data structure for chat model stream chunks.""" + + chunk: AIMessageChunk + + +class OnChatModelStreamEvent(BaseCustomChainEvent): + """Event representing a chunk of streamed chat model output.""" + + event: Literal["on_chat_model_stream"] = "on_chat_model_stream" + data: ChatModelStreamData + + +class Event(BaseModel): + """Generic event structure.""" + + event: str = "data" + data: dict + + +class EndEvent(BaseModel): + """Event representing the end of a stream.""" + + event: Literal["end"] = "end" + + +class CustomChain: + """A custom chain class that wraps a callable function.""" + + def __init__(self, func: Callable): + """Initialize the CustomChain with a callable function.""" + self.func = func + + async def astream_events(self, *args: Any, **kwargs: Any) -> AsyncGenerator: + """ + Asynchronously stream events from the wrapped function. + Applies Traceloop workflow decorator if Traceloop SDK is initialized. + """ + + if hasattr(TracerWrapper, "instance"): + func = workflow()(self.func) + else: + func = self.func + + gen: GeneratorType = func(*args, **kwargs) + + for event in gen: + yield event.model_dump() + + def invoke(self, *args: Any, **kwargs: Any) -> AIMessage: + """ + Invoke the wrapped function and process its events. + Returns an AIMessage with content and relative tool calls. + """ + events = self.func(*args, **kwargs) + response_content = "" + tool_calls = [] + for event in events: + if isinstance(event, OnChatModelStreamEvent): + if not isinstance(event.data.chunk.content, str): + raise ValueError("Chunk content must be a string") + response_content += event.data.chunk.content + elif isinstance(event, OnToolEndEvent): + tool_calls.append(event.data.model_dump()) + return AIMessage( + content=response_content, additional_kwargs={"tool_calls_data": tool_calls} + ) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + """Make the CustomChain instance callable, invoking the wrapped function.""" + return self.func(*args, **kwargs) + + +def custom_chain(func: Callable) -> CustomChain: + """ + Decorator function that wraps a callable in a CustomChain instance. + """ + + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + return func(*args, **kwargs) + + return CustomChain(wrapper) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/tracing.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/tracing.py new file mode 100644 index 0000000000..0f5ca84b80 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/app/utils/tracing.py @@ -0,0 +1,150 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +from typing import Any, Optional, Sequence + +from google.cloud import logging as google_cloud_logging +from google.cloud import storage +from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SpanExportResult + + +class CloudTraceLoggingSpanExporter(CloudTraceSpanExporter): + """ + An extended version of CloudTraceSpanExporter that logs span data to Google Cloud Logging + and handles large attribute values by storing them in Google Cloud Storage. + + This class helps bypass the 256 character limit of Cloud Trace for attribute values + by leveraging Cloud Logging (which has a 256KB limit) and Cloud Storage for larger payloads. + """ + + def __init__( + self, + logging_client: Optional[google_cloud_logging.Client] = None, + storage_client: Optional[storage.Client] = None, + bucket_name: Optional[str] = None, + debug: bool = False, + **kwargs: Any, + ) -> None: + """ + Initialize the exporter with Google Cloud clients and configuration. + + :param logging_client: Google Cloud Logging client + :param storage_client: Google Cloud Storage client + :param bucket_name: Name of the GCS bucket to store large payloads + :param debug: Enable debug mode for additional logging + :param kwargs: Additional arguments to pass to the parent class + """ + super().__init__(**kwargs) + self.debug = debug + self.logging_client = logging_client or google_cloud_logging.Client( + project=self.project_id + ) + self.logger = self.logging_client.logger(__name__) + self.storage_client = storage_client or storage.Client(project=self.project_id) + self.bucket_name = bucket_name or f"{self.project_id}-logs-data" + self._ensure_bucket_exists() + self.bucket = self.storage_client.bucket(self.bucket_name) + + def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: + """ + Export the spans to Google Cloud Logging and Cloud Trace. + + :param spans: A sequence of spans to export + :return: The result of the export operation + """ + for span in spans: + span_context = span.get_span_context() + trace_id = format(span_context.trace_id, "x") + span_id = format(span_context.span_id, "x") + span_dict = json.loads(span.to_json()) + + span_dict["trace"] = f"projects/{self.project_id}/traces/{trace_id}" + span_dict["span_id"] = span_id + + span_dict = self._process_large_attributes( + span_dict=span_dict, span_id=span_id + ) + + if self.debug: + print(span_dict) + + # Log the span data to Google Cloud Logging + self.logger.log_struct(span_dict, severity="INFO") + + # Export spans to Google Cloud Trace using the parent class method + return super().export(spans) + + def _ensure_bucket_exists(self) -> None: + """Ensure that the GCS bucket exists, creating it if necessary.""" + if not self.storage_client.bucket(self.bucket_name).exists(): + logging.info(f"Bucket {self.bucket_name} not detected. Creating it now.") + self.storage_client.create_bucket(self.bucket_name) + + def store_in_gcs(self, content: str, span_id: str) -> str: + """ + Initiate storing large content in Google Cloud Storage/ + + :param content: The content to store + :param span_id: The ID of the span + :return: The GCS URI of the stored content + """ + blob_name = f"spans/{span_id}.json" + blob = self.bucket.blob(blob_name) + + blob.upload_from_string(content, "application/json") + return f"gs://{self.bucket_name}/{blob_name}" + + def _process_large_attributes(self, span_dict: dict, span_id: str) -> dict: + """ + Process large attribute values by storing them in GCS if they exceed the size + limit of Google Cloud Logging. + + :param span_dict: The span data dictionary + :param trace_id: The trace ID + :param span_id: The span ID + :return: The updated span dictionary + """ + attributes = span_dict["attributes"] + if len(json.dumps(attributes).encode()) > 255 * 1024: # 250 KB + # Separate large payload from other attributes + attributes_payload = { + k: v + for k, v in attributes.items() + if "traceloop.association.properties" not in k + } + attributes_retain = { + k: v + for k, v in attributes.items() + if "traceloop.association.properties" in k + } + + # Store large payload in GCS + gcs_uri = self.store_in_gcs(json.dumps(attributes_payload), span_id) + attributes_retain["uri_payload"] = gcs_uri + attributes_retain["url_payload"] = ( + f"https://storage.mtls.cloud.google.com/" + f"{self.bucket_name}/spans/{span_id}.json" + ) + + span_dict["attributes"] = attributes_retain + logging.info( + "Length of payload span above 250 KB, storing attributes in GCS " + "to avoid large log entry errors" + ) + + return span_dict diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/README.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/README.md new file mode 100644 index 0000000000..6be94bb79e --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/README.md @@ -0,0 +1,116 @@ +# Deployment README.md + +This folder contains the infrastructure-as-code and CI/CD pipeline configurations for deploying a conversational Generative AI application on Google Cloud. + +The application leverages [**Terraform**](http://terraform.io) to define and provision the underlying infrastructure, while [**Cloud Build**](https://cloud.google.com/build/) orchestrates the continuous integration and continuous deployment (CI/CD) pipeline. + +## Deployment Workflow + +![Deployment Workflow](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/deployment_workflow.png) + +**Description:** + +1. CI Pipeline (`deployment/ci/pr_checks.yaml`): + + - Triggered on pull request creation/update + - Runs unit and integration tests + +2. CD Pipeline (`deployment/cd/staging.yaml`): + + - Triggered on merge to `main` branch + - Builds and pushes application to Artifact Registry + - Deploys to staging environment (Cloud Run) + - Performs load testing + +3. Production Deployment (`deployment/cd/deploy-to-prod.yaml`): + - Triggered after successful staging deployment + - Requires manual approval + - Deploys to production environment + +## Setup + +**Prerequisites:** + +1. A set of Google Cloud projects: + - Staging project + - Production project + - CI/CD project (can be the same as staging or production) +2. Terraform installed on your local machine +3. Enable required APIs in the CI/CD project. This will be required for the Terraform deployment: + + ```bash + gcloud config set project YOUR_CI_CD_PROJECT_ID + gcloud services enable serviceusage.googleapis.com cloudresourcemanager.googleapis.com cloudbuild.googleapis.com secretmanager.googleapis.com + ``` + +## Step-by-Step Guide + +1. **Create a Git Repository using your favorite Git provider (GitHub, GitLab, Bitbucket, etc.)** + +2. **Connect Your Repository to Cloud Build** + For detailed instructions, visit: [Cloud Build Repository Setup](https://cloud.google.com/build/docs/repositories#whats_next).
    + + ![Alt text](https://storage.googleapis.com/github-repo/generative-ai/sample-apps/e2e-gen-ai-app-starter-pack/connection_cb.gif) + +3. **Configure Terraform Variables** + + - Edit [`deployment/terraform/vars/env.tfvars`](../terraform/vars/env.tfvars) with your Google Cloud settings. + + | Variable | Description | Required | + | ---------------------- | --------------------------------------------------------------- | :------: | + | prod_project_id | **Production** Google Cloud Project ID for resource deployment. | Yes | + | staging_project_id | **Staging** Google Cloud Project ID for resource deployment. | Yes | + | cicd_runner_project_id | Google Cloud Project ID where CI/CD pipelines will execute. | Yes | + | region | Google Cloud region for resource deployment. | Yes | + | host_connection_name | Name of the host connection you created in Cloud Build | Yes | + | repository_name | Name of the repository you added to Cloud Build | Yes | + + Other optional variables include: telemetry and feedback BigQuery dataset IDs, log filters, sink names, service account names, bucket name suffixes, artifact registry repository name, and various role assignments for Cloud Run and CICD. + +4. **Deploy Infrastructure with Terraform** + + - Open a terminal and navigate to the Terraform directory: + + ```bash + cd deployment/terraform + ``` + + - Initialize Terraform: + + ```bash + terraform init + ``` + + - Apply the Terraform configuration: + + ```bash + terraform apply --var-file vars/env.tfvars + ``` + + - Type 'yes' when prompted to confirm + +After completing these steps, your infrastructure will be set up and ready for deployment! + +## Dev Deployment + +For End-to-end testing of the application, including tracing and feedback sinking to BigQuery, without the need to trigger a CI/CD pipeline. + +After you edited the relative [`env.tfvars` file](../terraform/dev/vars/env.tfvars), follow the following instructions: + +```bash +cd deployment/terraform/dev +terraform init +terraform apply --var-file vars/env.tfvars +``` + +Then deploy the application using the following command (from the root of the repository): + +```bash +gcloud run deploy genai-app-sample --source . --project $YOUR_DEV_PROJECT_ID --service-account genai-app-sample-cr-sa@$YOUR_DEV_PROJECT_ID.iam.gserviceaccount.com +``` + +### End-to-end Demo video + + + Watch the video + diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/deploy-to-prod.yaml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/deploy-to-prod.yaml new file mode 100644 index 0000000000..896de2eba8 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/deploy-to-prod.yaml @@ -0,0 +1,35 @@ +steps: + - name: "gcr.io/cloud-builders/gcloud" + id: trigger-deployment + entrypoint: gcloud + args: + - "run" + - "deploy" + - "genai-app-sample" + - "--image" + - "us-central1-docker.pkg.dev/$PROJECT_ID/$_ARTIFACT_REGISTRY_REPO_NAME/$_CONTAINER_NAME" + - "--region" + - "us-central1" + - "--project" + - $_PROD_PROJECT_ID + - "--min-instances" + - "1" + - "--no-cpu-throttling" + - "--cpu" + - "4" + - "--memory" + - "4Gi" + - "--concurrency" + - "40" + - "--service-account" + - "${_CLOUD_RUN_APP_SA_NAME}@${_PROD_PROJECT_ID}.iam.gserviceaccount.com" + - "--set-env-vars" + - "COMMIT_SHA=${COMMIT_SHA}" + +substitutions: + _PROD_PROJECT_ID: YOUR_PROD_PROJECT_ID # Replace with your prod project ID + _CONTAINER_NAME: genai-app-sample + _ARTIFACT_REGISTRY_REPO_NAME: genai-containers + _CLOUD_RUN_APP_SA_NAME: genai-app-sample-cr-sa +options: + logging: CLOUD_LOGGING_ONLY diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/staging.yaml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/staging.yaml new file mode 100644 index 0000000000..ff8eac0656 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/cd/staging.yaml @@ -0,0 +1,137 @@ +steps: + # # Build and Push + - name: "gcr.io/cloud-builders/docker" + args: + [ + "build", + "-t", + "us-central1-docker.pkg.dev/$PROJECT_ID/$_ARTIFACT_REGISTRY_REPO_NAME/$_CONTAINER_NAME", + ".", + ] + - name: "gcr.io/cloud-builders/docker" + args: + [ + "push", + "us-central1-docker.pkg.dev/$PROJECT_ID/$_ARTIFACT_REGISTRY_REPO_NAME/$_CONTAINER_NAME", + ] + + # Deploy to Staging + - name: "gcr.io/cloud-builders/gcloud" + id: deploy-staging + entrypoint: gcloud + args: + - "run" + - "deploy" + - "genai-app-sample" + - "--image" + - "us-central1-docker.pkg.dev/$PROJECT_ID/$_ARTIFACT_REGISTRY_REPO_NAME/$_CONTAINER_NAME" + - "--region" + - "us-central1" + - "--project" + - "${_STAGING_PROJECT_ID}" + - "--min-instances" + - "1" + - "--no-cpu-throttling" + - "--cpu" + - "4" + - "--memory" + - "4Gi" + - "--concurrency" + - "40" + - "--service-account" + - "${_CLOUD_RUN_APP_SA_NAME}@${_STAGING_PROJECT_ID}.iam.gserviceaccount.com" + - "--set-env-vars" + - "COMMIT_SHA=${COMMIT_SHA}" + + # Fetch Staging Service URL + - name: "gcr.io/cloud-builders/gcloud" + id: fetch-staging-url + entrypoint: /bin/bash + args: + - "-c" + - | + echo $(gcloud run services describe genai-app-sample \ + --region us-central1 --project ${_STAGING_PROJECT_ID} --format="value(status.url)") > staging_url.txt + + # Fetch ID Token + - name: gcr.io/cloud-builders/gcloud + id: fetch-id-token + entrypoint: /bin/bash + args: + - "-c" + - | + echo $(gcloud auth print-identity-token -q) > id_token.txt + + # Load Testing + - name: "python:3.10" + id: load_test + entrypoint: /bin/bash + args: + - "-c" + - | + export _ID_TOKEN=$(cat id_token.txt) + export _STAGING_URL=$(cat staging_url.txt) + pip install locust==2.31.1 + python -m locust -f tests/load_test/load_test.py \ + -H $$_STAGING_URL \ + --headless \ + -t 30s -u 10 -r 0.5 \ + --csv=tests/load_test/.results/results \ + --html=tests/load_test/.results/report.html + + # Export Load Test Results to GCS + - name: gcr.io/cloud-builders/gcloud + id: export-results-to-gcs + entrypoint: /bin/bash + args: + - "-c" + - | + export _TIMESTAMP=$(date +%Y%m%d-%H%M%S) + gsutil -m cp -r tests/load_test/.results gs://${_BUCKET_NAME_LOAD_TEST_RESULTS}/results-$${_TIMESTAMP} + echo "_________________________________________________________________________" + echo "Load test results copied to gs://${_BUCKET_NAME_LOAD_TEST_RESULTS}/results-$${_TIMESTAMP}" + echo "HTTP link: https://console.cloud.google.com/storage/browser/${_BUCKET_NAME_LOAD_TEST_RESULTS}/results-$${_TIMESTAMP}" + echo "_________________________________________________________________________" + + # Trigger Prod Deployment + - name: gcr.io/cloud-builders/gcloud + id: trigger-prod-deployment + entrypoint: gcloud + args: + - "beta" + - "builds" + - "triggers" + - "run" + - "deploy-to-prod-pipeline" + - "--region" + - "$LOCATION" + - "--project" + - "$PROJECT_ID" + - "--substitutions" + - "_PROD_PROJECT_ID=${_PROD_PROJECT_ID},_ARTIFACT_REGISTRY_REPO_NAME=${_ARTIFACT_REGISTRY_REPO_NAME},_CONTAINER_NAME=${_CONTAINER_NAME},_CLOUD_RUN_APP_SA_NAME=${_CLOUD_RUN_APP_SA_NAME}" + - "--sha" + - $COMMIT_SHA + + - name: gcr.io/cloud-builders/gcloud + id: echo-view-build-trigger-link + entrypoint: /bin/bash + args: + - "-c" + - | + echo "_________________________________________________________________________" + echo "Production deployment triggered. View progress and / or approve on the Cloud Build Console:" + echo "https://console.cloud.google.com/cloud-build/builds;region=$LOCATION" + echo "_________________________________________________________________________" + +substitutions: + _STAGING_PROJECT_ID: YOUR_STAGING_PROJECT_ID # Replace with your staging project ID + _PROD_PROJECT_ID: YOUR_PROD_PROJECT_ID # Replace with your prod project ID + _BUCKET_NAME_LOAD_TEST_RESULTS: YOUR_CICD_PROJECT_ID-cicd-load-test-results # Replace with your GCS bucket name + _CONTAINER_NAME: genai-app-sample + _ARTIFACT_REGISTRY_REPO_NAME: genai-containers + _CLOUD_RUN_APP_SA_NAME: genai-app-sample-cr-sa + +options: + substitutionOption: ALLOW_LOOSE + defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET +serviceAccount: projects/${PROJECT_ID}/serviceAccounts/cicd-runner@${PROJECT_ID}.iam.gserviceaccount.com diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/ci/pr_checks.yaml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/ci/pr_checks.yaml new file mode 100644 index 0000000000..c018170ec2 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/ci/pr_checks.yaml @@ -0,0 +1,27 @@ +steps: + - name: "python:3.10" + id: install-dependencies + entrypoint: /bin/bash + args: + - "-c" + - | + pip install poetry==1.8.3 --user && python -m poetry install + + - name: "python:3.10" + id: unit-tests + entrypoint: /bin/bash + args: + - "-c" + - | + python -m poetry run pytest tests/unit + + # Run integration tests + - name: "python:3.10" + id: integration-tests + entrypoint: /bin/bash + args: + - "-c" + - | + python -m poetry run pytest tests/integration +options: + logging: CLOUD_LOGGING_ONLY diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/apis.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/apis.tf new file mode 100644 index 0000000000..11e3d9f1df --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/apis.tf @@ -0,0 +1,49 @@ +locals { + cicd_services = [ + "cloudbuild.googleapis.com", + "discoveryengine.googleapis.com", + "aiplatform.googleapis.com", + "serviceusage.googleapis.com", + "bigquery.googleapis.com", + "cloudresourcemanager.googleapis.com", + "cloudtrace.googleapis.com" + ] + + shared_services = [ + "aiplatform.googleapis.com", + "run.googleapis.com", + "discoveryengine.googleapis.com", + "cloudresourcemanager.googleapis.com", + "iam.googleapis.com", + "bigquery.googleapis.com", + "serviceusage.googleapis.com", + "logging.googleapis.com", + "cloudtrace.googleapis.com" + ] + + projects = { + prod = var.prod_project_id + staging = var.staging_project_id + } + +} + +resource "google_project_service" "cicd_services" { + count = length(local.cicd_services) + project = var.cicd_runner_project_id + service = local.cicd_services[count.index] + disable_on_destroy = false +} + +resource "google_project_service" "shared_services" { + for_each = { + for pair in setproduct(keys(local.projects), local.shared_services) : + "${pair[0]}_${replace(pair[1], ".", "_")}" => { + project = local.projects[pair[0]] + service = pair[1] + } + } + project = each.value.project + service = each.value.service + disable_on_destroy = false +} \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/artifact_registry.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/artifact_registry.tf new file mode 100644 index 0000000000..239b65e021 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/artifact_registry.tf @@ -0,0 +1,8 @@ +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = var.artifact_registry_repo_name + description = "Repo for Generative AI applications" + format = "DOCKER" + project = var.cicd_runner_project_id + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/build_triggers.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/build_triggers.tf new file mode 100644 index 0000000000..a0fc872c67 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/build_triggers.tf @@ -0,0 +1,80 @@ +# a. Create PR checks trigger +resource "google_cloudbuild_trigger" "pr_checks" { + name = "pr-checks" + project = var.cicd_runner_project_id + location = var.region + description = "Trigger for PR checks" + service_account = resource.google_service_account.cicd_runner_sa.id + + repository_event_config { + repository = "projects/${var.cicd_runner_project_id}/locations/${var.region}/connections/${var.host_connection_name}/repositories/${var.repository_name}" + pull_request { + branch = "main" + } + } + + filename = "deployment/ci/pr_checks.yaml" + included_files = [ + "app/**", + "tests/**", + "deployment/**", + "poetry.lock" + ] + + include_build_logs = "INCLUDE_BUILD_LOGS_WITH_STATUS" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} + +# b. Create CD pipeline trigger +resource "google_cloudbuild_trigger" "cd_pipeline" { + name = "cd-pipeline" + project = var.cicd_runner_project_id + location = var.region + service_account = resource.google_service_account.cicd_runner_sa.id + description = "Trigger for CD pipeline" + + repository_event_config { + repository = "projects/${var.cicd_runner_project_id}/locations/${var.region}/connections/${var.host_connection_name}/repositories/${var.repository_name}" + push { + branch = "main" + } + } + + filename = "deployment/cd/staging.yaml" + included_files = [ + "app/**", + "tests/**", + "deployment/**", + "poetry.lock" + ] + substitutions = { + _STAGING_PROJECT_ID = var.staging_project_id + _PROD_PROJECT_ID = var.prod_project_id + _BUCKET_NAME_LOAD_TEST_RESULTS = resource.google_storage_bucket.bucket_load_test_results.name + _ARTIFACT_REGISTRY_REPO_NAME = var.artifact_registry_repo_name + _CLOUD_RUN_APP_SA_NAME = var.cloud_run_app_sa_name + } + + include_build_logs = "INCLUDE_BUILD_LOGS_WITH_STATUS" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} + +# c. Create Deploy to production trigger +resource "google_cloudbuild_trigger" "deploy_to_prod_pipeline" { + name = "deploy-to-prod-pipeline" + project = var.cicd_runner_project_id + location = var.region + description = "Trigger for deployment to production" + service_account = resource.google_service_account.cicd_runner_sa.id + repository_event_config { + repository = "projects/${var.cicd_runner_project_id}/locations/${var.region}/connections/${var.host_connection_name}/repositories/${var.repository_name}" + } + filename = "deployment/cd/deploy-to-prod.yaml" + include_build_logs = "INCLUDE_BUILD_LOGS_WITH_STATUS" + approval_config { + approval_required = true + } + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/iam.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/iam.tf new file mode 100644 index 0000000000..a498653328 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/iam.tf @@ -0,0 +1,20 @@ +locals { + project_ids = { + dev = var.dev_project_id + } +} + +# 4. Grant Cloud Run SA the required permissions to run the application +resource "google_project_iam_member" "cloud_run_app_sa_roles" { + for_each = { + for pair in setproduct(keys(local.project_ids), var.cloud_run_app_roles) : + join(",", pair) => { + project = local.project_ids[pair[0]] + role = pair[1] + } + } + + project = each.value.project + role = each.value.role + member = "serviceAccount:${google_service_account.cloud_run_app_sa.email}" +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf new file mode 100644 index 0000000000..80fa73e42d --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf @@ -0,0 +1,49 @@ +resource "google_project_iam_member" "bigquery_data_editor" { + + project = var.dev_project_id + role = "roles/bigquery.dataEditor" + member = module.log_export_to_bigquery.writer_identity +} + + +module "log_export_to_bigquery" { + + source = "terraform-google-modules/log-export/google" + version = "8.1.0" + + log_sink_name = var.telemetry_sink_name + parent_resource_type = "project" + parent_resource_id = var.dev_project_id + destination_uri = "bigquery.googleapis.com/projects/${var.dev_project_id}/datasets/${var.telemetry_bigquery_dataset_id}" + filter = var.telemetry_logs_filter + bigquery_options = { use_partitioned_tables = true } + unique_writer_identity = true + +} + +resource "google_bigquery_dataset" "feedback_dataset" { + project = var.dev_project_id + dataset_id = var.feedback_bigquery_dataset_id + friendly_name = var.feedback_bigquery_dataset_id + location = var.region + +} + +module "feedback_export_to_bigquery" { + source = "terraform-google-modules/log-export/google" + version = "8.1.0" + log_sink_name = var.feedback_sink_name + parent_resource_type = "project" + parent_resource_id = var.dev_project_id + destination_uri = "bigquery.googleapis.com/projects/${var.dev_project_id}/datasets/${var.feedback_bigquery_dataset_id}" + filter = var.feedback_logs_filter + bigquery_options = { use_partitioned_tables = true } + unique_writer_identity = true +} + +resource "google_bigquery_dataset" "telemetry_logs_dataset" { + project = var.dev_project_id + dataset_id = var.telemetry_bigquery_dataset_id + friendly_name = var.telemetry_bigquery_dataset_id + location = var.region +} \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/service_accounts.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/service_accounts.tf new file mode 100644 index 0000000000..935bd815f5 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/service_accounts.tf @@ -0,0 +1,5 @@ +resource "google_service_account" "cloud_run_app_sa" { + account_id = var.cloud_run_app_sa_name + display_name = "Cloud Run Generative AI app SA" + project = var.dev_project_id +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf new file mode 100644 index 0000000000..20d7a1ecce --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf @@ -0,0 +1,29 @@ +terraform { + required_version = ">= 1.0.0" + required_providers { + google = { + source = "hashicorp/google" + version = ">= 3.53.0, < 6.0.0" + } + } +} + +resource "google_storage_bucket" "logs_data_bucket" { + name = "${var.dev_project_id}-logs-data" + location = var.region + project = var.dev_project_id + uniform_bucket_level_access = true + + lifecycle { + prevent_destroy = true + ignore_changes = all + } + + # Use this block to create the bucket only if it doesn't exist + count = length(data.google_storage_bucket.existing_bucket) > 0 ? 0 : 1 +} + + +data "google_storage_bucket" "existing_bucket" { + name = "${var.dev_project_id}-logs-data" +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/variables.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/variables.tf new file mode 100644 index 0000000000..eddb5d9f60 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/variables.tf @@ -0,0 +1,64 @@ +variable "dev_project_id" { + type = string + description = "**Dev** Google Cloud Project ID for resource deployment." +} + +variable "region" { + type = string + description = "Google Cloud region for resource deployment." + default = "us-central1" +} + +variable "telemetry_bigquery_dataset_id" { + type = string + description = "BigQuery dataset ID for telemetry data export." + default = "telemetry_genai_app_sample_sink" +} + +variable "feedback_bigquery_dataset_id" { + type = string + description = "BigQuery dataset ID for feedback data export." + default = "feedback_genai_app_sample_sink" +} + +variable "telemetry_logs_filter" { + type = string + description = "Log Sink filter for capturing telemetry data. Captures logs with the `traceloop.association.properties.log_type` attribute set to `tracing`." + default = "jsonPayload.attributes.\"traceloop.association.properties.log_type\"=\"tracing\" jsonPayload.resource.attributes.\"service.name\"=\"Sample Chatbot Application\"" +} + +variable "feedback_logs_filter" { + type = string + description = "Log Sink filter for capturing feedback data. Captures logs where the `log_type` field is `feedback`." + default = "jsonPayload.log_type=\"feedback\"" +} + +variable "telemetry_sink_name" { + type = string + description = "Name of the telemetry data Log Sink." + default = "telemetry_logs_genai_app_sample" +} + +variable "feedback_sink_name" { + type = string + description = "Name of the feedback data Log Sink." + default = "feedback_logs_genai_app_sample" +} + +variable "cloud_run_app_sa_name" { + description = "Service account name to be used for the Cloud Run service" + type = string + default = "genai-app-sample-cr-sa" +} + +variable "cloud_run_app_roles" { + description = "List of roles to assign to the Cloud Run app service account" + type = list(string) + default = [ + "roles/aiplatform.user", + "roles/discoveryengine.editor", + "roles/logging.logWriter", + "roles/cloudtrace.agent", + "roles/storage.admin" + ] +} \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/vars/env.tfvars b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/vars/env.tfvars new file mode 100644 index 0000000000..4e75cc2b36 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/vars/env.tfvars @@ -0,0 +1,16 @@ +# Your Dev Google Cloud project id +dev_project_id = "your-dev-project-id" + +# The Google Cloud region you will use to deploy the infrastructure +region = "us-central1" + +telemetry_bigquery_dataset_id = "telemetry_genai_app_sample_sink" +telemetry_sink_name = "telemetry_logs_genai_app_sample" +telemetry_logs_filter = "jsonPayload.attributes.\"traceloop.association.properties.log_type\"=\"tracing\" jsonPayload.resource.attributes.\"service.name\"=\"Sample Chatbot Application\"" + +feedback_bigquery_dataset_id = "feedback_genai_app_sample_sink" +feedback_sink_name = "feedback_logs_genai_app_sample" +feedback_logs_filter = "jsonPayload.log_type=\"feedback\"" + +cloud_run_app_sa_name = "genai-app-sample-cr-sa" + diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/iam.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/iam.tf new file mode 100644 index 0000000000..ddd3587294 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/iam.tf @@ -0,0 +1,81 @@ +locals { + project_ids = { + prod = var.prod_project_id + staging = var.staging_project_id + } +} + +# Data source to get project numbers +data "google_project" "projects" { + for_each = local.project_ids + project_id = each.value +} + +# 1. Assign roles for the CICD project +resource "google_project_iam_member" "cicd_project_roles" { + for_each = toset(var.cicd_roles) + + project = var.cicd_runner_project_id + role = each.value + member = "serviceAccount:${resource.google_service_account.cicd_runner_sa.email}" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} + +# 2. Assign roles for the other two projects (prod and staging) +resource "google_project_iam_member" "other_projects_roles" { + for_each = { + for pair in setproduct(keys(local.project_ids), var.cicd_sa_deployment_required_roles) : + "${pair[0]}-${pair[1]}" => { + project_id = local.project_ids[pair[0]] + role = pair[1] + } + } + + project = each.value.project_id + role = each.value.role + member = "serviceAccount:${resource.google_service_account.cicd_runner_sa.email}" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} + +# 3. Allow Cloud Run service SA to pull containers stored in the CICD project +resource "google_project_iam_member" "cicd_run_invoker_artifact_registry_reader" { + for_each = local.project_ids + project = var.cicd_runner_project_id + + role = "roles/artifactregistry.reader" + member = "serviceAccount:service-${data.google_project.projects[each.key].number}@serverless-robot-prod.iam.gserviceaccount.com" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} + +# 4. Grant Cloud Run SA the required permissions to run the application +resource "google_project_iam_member" "cloud_run_app_sa_roles" { + for_each = { + for pair in setproduct(keys(local.project_ids), var.cloud_run_app_roles) : + join(",", pair) => { + project = local.project_ids[pair[0]] + role = pair[1] + } + } + + project = each.value.project + role = each.value.role + member = "serviceAccount:${google_service_account.cloud_run_app_sa[split(",", each.key)[0]].email}" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} + +# Special assignment: Allow the CICD SA to create tokens +resource "google_service_account_iam_member" "cicd_run_invoker_token_creator" { + service_account_id = google_service_account.cicd_runner_sa.name + role = "roles/iam.serviceAccountTokenCreator" + member = "serviceAccount:${resource.google_service_account.cicd_runner_sa.email}" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} +# Special assignment: Allow the CICD SA to impersonate himself for trigger creation +resource "google_service_account_iam_member" "cicd_run_invoker_account_user" { + service_account_id = google_service_account.cicd_runner_sa.name + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${resource.google_service_account.cicd_runner_sa.email}" + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf new file mode 100644 index 0000000000..676d68215c --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf @@ -0,0 +1,58 @@ +resource "google_project_iam_member" "bigquery_data_editor" { + for_each = local.project_ids + + project = each.value + role = "roles/bigquery.dataEditor" + member = module.log_export_to_bigquery[each.key].writer_identity +} + +module "log_export_to_bigquery" { + for_each = local.project_ids + + source = "terraform-google-modules/log-export/google" + version = "8.1.0" + + log_sink_name = var.telemetry_sink_name + parent_resource_type = "project" + parent_resource_id = each.value + destination_uri = "bigquery.googleapis.com/projects/${each.value}/datasets/${var.telemetry_bigquery_dataset_id}" + filter = var.telemetry_logs_filter + bigquery_options = { use_partitioned_tables = true } + unique_writer_identity = true + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} + +resource "google_bigquery_dataset" "feedback_dataset" { + for_each = local.project_ids + project = each.value + dataset_id = var.feedback_bigquery_dataset_id + friendly_name = var.feedback_bigquery_dataset_id + location = var.region + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] + +} + +module "feedback_export_to_bigquery" { + for_each = local.project_ids + + source = "terraform-google-modules/log-export/google" + version = "8.1.0" + log_sink_name = var.feedback_sink_name + parent_resource_type = "project" + parent_resource_id = each.value + destination_uri = "bigquery.googleapis.com/projects/${each.value}/datasets/${var.feedback_bigquery_dataset_id}" + filter = var.feedback_logs_filter + bigquery_options = { use_partitioned_tables = true } + unique_writer_identity = true + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services, google_bigquery_dataset.feedback_dataset] +} + +resource "google_bigquery_dataset" "telemetry_logs_dataset" { + depends_on = [module.log_export_to_bigquery, module.feedback_export_to_bigquery, resource.google_project_service.shared_services] + for_each = local.project_ids + project = each.value + dataset_id = var.telemetry_bigquery_dataset_id + friendly_name = var.telemetry_bigquery_dataset_id + location = var.region +} \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/service_accounts.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/service_accounts.tf new file mode 100644 index 0000000000..d158071bab --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/service_accounts.tf @@ -0,0 +1,15 @@ +resource "google_service_account" "cicd_runner_sa" { + account_id = var.cicd_runner_sa_name + display_name = "CICD Runner SA" + project = var.cicd_runner_project_id + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} + +resource "google_service_account" "cloud_run_app_sa" { + for_each = local.project_ids + + account_id = var.cloud_run_app_sa_name + display_name = "Cloud Run Generative AI app SA" + project = each.value + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf new file mode 100644 index 0000000000..a6f994cb98 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf @@ -0,0 +1,37 @@ +terraform { + required_version = ">= 1.0.0" + required_providers { + google = { + source = "hashicorp/google" + version = ">= 3.53.0, < 6.0.0" + } + } +} + +locals { + all_projects = [ + var.cicd_runner_project_id, + var.prod_project_id, + var.staging_project_id + ] +} + +resource "google_storage_bucket" "bucket_load_test_results" { + name = "${var.cicd_runner_project_id}-${var.suffix_bucket_name_load_test_results}" + location = var.region + project = var.cicd_runner_project_id + uniform_bucket_level_access = true + force_destroy = true + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} + +resource "google_storage_bucket" "logs_data_bucket" { + for_each = toset(local.all_projects) + name = "${each.value}-logs-data" + location = var.region + project = each.value + uniform_bucket_level_access = true + force_destroy = true + + depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services] +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/variables.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/variables.tf new file mode 100644 index 0000000000..57e7f05f63 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/variables.tf @@ -0,0 +1,125 @@ +variable "prod_project_id" { + type = string + description = "**Production** Google Cloud Project ID for resource deployment." +} + +variable "staging_project_id" { + type = string + description = "**Staging** Google Cloud Project ID for resource deployment." +} + +variable "cicd_runner_project_id" { + type = string + description = "Google Cloud Project ID where CI/CD pipelines will execute." +} + +variable "region" { + type = string + description = "Google Cloud region for resource deployment." + default = "us-central1" +} + +variable "host_connection_name" { + description = "Name of the host connection you created in Cloud Build" + type = string +} + +variable "repository_name" { + description = "Name of the repository you'd like to connect to Cloud Build" + type = string +} + +variable "telemetry_bigquery_dataset_id" { + type = string + description = "BigQuery dataset ID for telemetry data export." + default = "telemetry_genai_app_sample_sink" +} + +variable "feedback_bigquery_dataset_id" { + type = string + description = "BigQuery dataset ID for feedback data export." + default = "feedback_genai_app_sample_sink" +} + +variable "telemetry_logs_filter" { + type = string + description = "Log Sink filter for capturing telemetry data. Captures logs with the `traceloop.association.properties.log_type` attribute set to `tracing`." + default = "jsonPayload.attributes.\"traceloop.association.properties.log_type\"=\"tracing\" jsonPayload.resource.attributes.\"service.name\"=\"Sample Chatbot Application\"" +} + +variable "feedback_logs_filter" { + type = string + description = "Log Sink filter for capturing feedback data. Captures logs where the `log_type` field is `feedback`." + default = "jsonPayload.log_type=\"feedback\"" +} + +variable "telemetry_sink_name" { + type = string + description = "Name of the telemetry data Log Sink." + default = "telemetry_logs_genai_app_sample" +} + +variable "feedback_sink_name" { + type = string + description = "Name of the feedback data Log Sink." + default = "feedback_logs_genai_app_sample" +} + +variable "cicd_runner_sa_name" { + description = "Service account name to be used for the CICD processes" + type = string + default = "cicd-runner" +} + +variable "cloud_run_app_sa_name" { + description = "Service account name to be used for the Cloud Run service" + type = string + default = "genai-app-sample-cr-sa" +} + +variable "suffix_bucket_name_load_test_results" { + description = "Suffix Name of the bucket that will be used to store the results of the load test. Prefix will be project id." + type = string + default = "cicd-load-test-results" +} + + +variable "artifact_registry_repo_name" { + description = "Name of the Artifact registry repository to be used to push containers" + type = string + default = "genai-containers" +} + + + +variable "cloud_run_app_roles" { + description = "List of roles to assign to the Cloud Run app service account" + type = list(string) + default = [ + "roles/aiplatform.user", + "roles/discoveryengine.editor", + "roles/logging.logWriter", + "roles/cloudtrace.agent", + "roles/storage.admin" + ] +} + +variable "cicd_roles" { + description = "List of roles to assign to the CICD runner service account in the CICD project" + type = list(string) + default = [ + "roles/storage.admin", + "roles/run.invoker", + "roles/aiplatform.user", + "roles/discoveryengine.editor", + "roles/logging.logWriter", + "roles/artifactregistry.writer", + "roles/cloudbuild.builds.builder" + ] +} + +variable "cicd_sa_deployment_required_roles" { + description = "List of roles to assign to the CICD runner service account for the Staging and Prod projects." + type = list(string) + default = ["roles/run.developer", "roles/iam.serviceAccountUser"] +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/vars/env.tfvars b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/vars/env.tfvars new file mode 100644 index 0000000000..52679238c6 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/vars/env.tfvars @@ -0,0 +1,31 @@ +# Your Production Google Cloud project id +prod_project_id = "your-production-project-id" + +# Your Staging / Test Google Cloud project id +staging_project_id = "your-staging-project-id" + +# Your Google Cloud project ID that will be used to host the Cloud Build pipelines. +cicd_runner_project_id = "your-cicd-project-id" + +# Name of the host connection you created in Cloud Build +host_connection_name = "your-host-connection-name" + +# Name of the repository you added to Cloud Build +repository_name = "your-repository-name" + +# The Google Cloud region you will use to deploy the infrastructure +region = "us-central1" + +telemetry_bigquery_dataset_id = "telemetry_genai_app_sample_sink" +telemetry_sink_name = "telemetry_logs_genai_app_sample" +telemetry_logs_filter = "jsonPayload.attributes.\"traceloop.association.properties.log_type\"=\"tracing\" jsonPayload.resource.attributes.\"service.name\"=\"Sample Chatbot Application\"" + +feedback_bigquery_dataset_id = "feedback_genai_app_sample_sink" +feedback_sink_name = "feedback_logs_genai_app_sample" +feedback_logs_filter = "jsonPayload.log_type=\"feedback\"" + +cicd_runner_sa_name = "cicd-runner" +cloud_run_app_sa_name = "genai-app-sample-cr-sa" + +suffix_bucket_name_load_test_results = "cicd-load-test-results" +artifact_registry_repo_name = "genai-containers" \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/notebooks/getting_started.ipynb b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/notebooks/getting_started.ipynb new file mode 100644 index 0000000000..2efd479b1c --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/notebooks/getting_started.ipynb @@ -0,0 +1,1160 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OsXAs2gcIpbC" + }, + "outputs": [], + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7ZX50cNFOFBt" + }, + "source": [ + "# Getting Started - Template" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "84eed97da4c4" + }, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
    \n", + " \n", + " \"Google
    Open in Colab\n", + "
    \n", + "
    \n", + " \n", + " \"Google
    Open in Colab Enterprise\n", + "
    \n", + "
    \n", + " \n", + " \"Vertex
    Open in Vertex AI Workbench\n", + "
    \n", + "
    \n", + " \n", + " \"GitHub
    View on GitHub\n", + "
    \n", + "
    " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "usd0d_LiOFBt" + }, + "source": [ + "| | |\n", + "|-|-|\n", + "|Author(s) | [Elia Secchi](https://github.com/eliasecchig) |" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MjDmmmDaOFBt" + }, + "source": [ + "## Overview\n", + "\n", + "This tutorial walks you through the process of developing and assessing a chain - a sequence of steps that power an AI application. \n", + "These operations may include interactions with language models, utilization of tools, or data preprocessing steps, aiming to solve a given use case e.g a chatbot that provides grounded information.\n", + "\n", + "You'll learn how to:\n", + "\n", + "1. Build chains using three different approaches:\n", + " - [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/expression_language/)\n", + " - [LangGraph](https://python.langchain.com/docs/langgraph/)\n", + " - A custom Python implementation. This is to enable implementation with other SDKs ( e.g [Vertex AI SDK](https://cloud.google.com/vertex-ai/docs/python-sdk/use-vertex-ai-python-sdk ), [LlamaIndex](https://www.llamaindex.ai/)) and to allow granular control on the sequence of steps in the chain\n", + " \n", + "2. Evaluate the performance of your chains using [Vertex AI Evaluation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview)\n", + "\n", + "Finally, the tutorial discusses next steps for deploying your chain in a production application\n", + "\n", + "By the end of this tutorial, you'll have a solid foundation for developing and refining your own Generative AI chains." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w-OcPSC8_FUX" + }, + "source": [ + "## Get Started" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c0a13ca7427f" + }, + "source": [ + "### Install required packages using Poetry (Recommended)\n", + "\n", + "This template uses [Poetry](https://python-poetry.org/) as tool to manage project dependencies. \n", + "Poetry makes it easy to install and keep track of the packages your project needs.\n", + "\n", + "To run this notebook with Poetry, follow these steps:\n", + "1. Make sure Poetry is installed. See the [relative guide for installation](https://python-poetry.org/docs/#installation).\n", + "\n", + "2. Make sure that dependencies are installed. From your command line:\n", + "\n", + " ```bash\n", + " poetry install --with streamlit,jupyter\n", + " ```\n", + "\n", + "3. Run Jupyter:\n", + "\n", + " ```bash\n", + " poetry run jupyter\n", + " ```\n", + " \n", + "4. Open this notebook in the Jupyter interface." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-7Jso8-FO4N8" + }, + "source": [ + "### (Alternative) Install Vertex AI SDK and other required packages " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tUat7NRq5JDC" + }, + "outputs": [], + "source": [ + "%pip install --quiet --upgrade nest_asyncio\n", + "%pip install --upgrade --user --quiet langchain-core langchain-google-vertexai langchain-google-community langchain langgraph\n", + "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[rapid_evaluation]\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "R5Xep4W9lq-Z" + }, + "source": [ + "### Restart runtime\n", + "\n", + "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n", + "\n", + "The restart might take a minute or longer. After it's restarted, continue to the next step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XRvKdaPDTznN" + }, + "outputs": [], + "source": [ + "import IPython\n", + "\n", + "app = IPython.Application.instance()\n", + "app.kernel.do_shutdown(True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SbmM4z7FOBpM" + }, + "source": [ + "
    \n", + "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n", + "
    " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dmWOrTJ3gx13" + }, + "source": [ + "### Authenticate your notebook environment (Colab only)\n", + "\n", + "If you're running this notebook on Google Colab, run the cell below to authenticate your environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NyKGtVQjgx13" + }, + "outputs": [], + "source": [ + "# import sys\n", + "\n", + "# if \"google.colab\" in sys.modules:\n", + "# from google.colab import auth\n", + "\n", + "# auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DF4l8DTdWgPY" + }, + "source": [ + "### Set Google Cloud project information and initialize Vertex AI SDK\n", + "\n", + "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n", + "\n", + "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Nqwi-5ufWp_B" + }, + "outputs": [], + "source": [ + "# Use the environment variable if the user doesn't provide Project ID.\n", + "import os\n", + "\n", + "import vertexai\n", + "\n", + "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n", + "if PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n", + "\n", + "vertexai.init(project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dvhI92xhQTzk" + }, + "source": [ + "### Import libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "869d543465ac" + }, + "outputs": [], + "source": [ + "# Add the parent directory to the Python path. This allows importing modules from the parent directory\n", + "import sys\n", + "\n", + "sys.path.append(\"../\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "146b41115577" + }, + "outputs": [], + "source": [ + "from collections.abc import Iterator\n", + "import json\n", + "from typing import Any, Literal\n", + "\n", + "from app.eval.utils import batch_generate_messages, generate_multiturn_history\n", + "from app.patterns.custom_rag_qa.templates import (\n", + " inspect_conversation_template,\n", + " rag_template,\n", + " template_docs,\n", + ")\n", + "from app.patterns.custom_rag_qa.vector_store import get_vector_store\n", + "from app.utils.output_types import OnChatModelStreamEvent, OnToolEndEvent, custom_chain\n", + "from google.cloud import aiplatform\n", + "from langchain.schema import Document\n", + "from langchain_core.messages import ToolMessage\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.runnables import RunnableConfig\n", + "from langchain_core.tools import tool\n", + "from langchain_google_community.vertex_rank import VertexAIRank\n", + "from langchain_google_vertexai import ChatVertexAI, VertexAIEmbeddings\n", + "from langgraph.graph import END, MessagesState, StateGraph\n", + "from langgraph.prebuilt import ToolNode\n", + "import pandas as pd\n", + "from vertexai.evaluation import CustomMetric, EvalTask\n", + "import yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "675dff1826d4" + }, + "source": [ + "## Chain Interface\n", + "\n", + "This section outlines a possible interface for the chain, which, if implemented, ensures compatibility with the FastAPI server application included in the template. However, it's important to note that you have the flexibility to explore and implement alternative interfaces that suit their specific needs and requirements.\n", + "\n", + "\n", + "### Input Interface\n", + "\n", + "The chain must provide an `astream_events` method that accepts a dictionary with a \"messages\" key.\n", + "The \"messages\" value should be a list of LangChain [HumanMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.human.HumanMessage.html), [AIMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) objects and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html).\n", + "\n", + "For example a possible input might be:\n", + "\n", + "```py\n", + "{\n", + " \"messages\": [\n", + " HumanMessage(\"first\"),\n", + " AIMessage(\"a response\"),\n", + " HumanMessage(\"a follow up\"),\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Alternatively you can use the shortened form:\n", + "\n", + "```py\n", + "{\"messages\": [(\"user\", \"first\"), (\"ai\", \"a response\"), (\"user\", \"a follow up\")]}\n", + "```\n", + "\n", + "### Output Interface\n", + "\n", + "All chains use the [LangChain Stream Events (v2) API](https://python.langchain.com/docs/how_to/streaming/#using-stream-events). This API supports various use cases (simple chains, RAG, Agents). This API emits asynchronous events that can be used to stream the chain's output.\n", + "\n", + "LangChain chains (LCEL, LangGraph) automatically implement the `astream_events` API. \n", + "\n", + "We provide examples of emitting `astream_events`-compatible events with custom Python code, allowing implementation with other SDKs (e.g., Vertex AI, LLamaIndex).\n", + "\n", + "### Customizing I/O Interfaces\n", + "\n", + "To modify the Input/Output interface, update `app/server.py` and related unit and integration tests." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "865ba0268d3b" + }, + "source": [ + "## Events supported\n", + "\n", + "The following list defines the events that are captured and supported by the Streamlit frontend." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4ece481555a2" + }, + "outputs": [], + "source": [ + "SUPPORTED_EVENTS = [\n", + " \"on_tool_start\",\n", + " \"on_tool_end\",\n", + " \"on_retriever_start\",\n", + " \"on_retriever_end\",\n", + " \"on_chat_model_stream\",\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "874120e6a4d2" + }, + "source": [ + "### Define the LLM\n", + "We set up the Large Language Model (LLM) for our conversational bot." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0d5c52272898" + }, + "outputs": [], + "source": [ + "llm = ChatVertexAI(model_name=\"gemini-1.5-flash-002\", temperature=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "abc296e9da88" + }, + "source": [ + "### Leverage LangChain LCEL\n", + "\n", + "LangChain Expression Language (LCEL) provides a declarative approach to composing chains seamlessly. Key benefits include:\n", + "\n", + "1. Rapid prototyping to production deployment without code changes\n", + "2. Scalability from simple \"prompt + LLM\" chains to complex, multi-step workflows\n", + "3. Enhanced readability and maintainability of chain logic\n", + "\n", + "For comprehensive guidance on LCEL implementation, refer to the [official documentation](https://python.langchain.com/docs/expression_language/get_started)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "25d185f29f42" + }, + "outputs": [], + "source": [ + "template = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\n", + " \"system\",\n", + " \"You are a knowledgeable culinary assistant specializing in providing\"\n", + " \"detailed cooking recipes. Your responses should be informative, engaging, \"\n", + " \"and tailored to the user's specific requests. Include ingredients, \"\n", + " \"step-by-step instructions, cooking times, and any helpful tips or \"\n", + " \"variations. If asked about dietary restrictions or substitutions, offer \"\n", + " \"appropriate alternatives.\",\n", + " ),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", + " ]\n", + ")\n", + "\n", + "chain = template | llm" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "216f187e66d7" + }, + "source": [ + "Let's test the chain with a dummy question:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "267a75f53b45" + }, + "outputs": [], + "source": [ + "input_message = {\"messages\": [(\"human\", \"Can you provide me a Lasagne recipe?\")]}\n", + "\n", + "async for event in chain.astream_events(input=input_message, version=\"v2\"):\n", + " if event[\"event\"] in SUPPORTED_EVENTS:\n", + " print(event[\"data\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b262558c2375" + }, + "source": [ + "This methodology is used for the chain defined in the [`app/chain.py`](../app/chain.py) file." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1fa3c684b527" + }, + "source": [ + "We can also leverage the `invoke` method for synchronous invocation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5fc2bfec1d5b" + }, + "outputs": [], + "source": [ + "response = chain.invoke(input=input_message)\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "16a9fd5fdf2f" + }, + "source": [ + "### Use LangGraph\n", + "\n", + "LangGraph is a framework for building stateful, multi-actor applications with Large Language Models (LLMs). \n", + "It extends the LangChain library, allowing you to coordinate multiple chains (or actors) across multiple steps of computation in a cyclic manner." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "28b4ad81f8b7" + }, + "outputs": [], + "source": [ + "# 1. Define tools\n", + "\n", + "\n", + "@tool\n", + "def search(query: str):\n", + " \"\"\"Simulates a web search. Use it get information on weather. E.g what is the weather like in a region\"\"\"\n", + " if \"sf\" in query.lower() or \"san francisco\" in query.lower():\n", + " return \"It's 60 degrees and foggy.\"\n", + " return \"It's 90 degrees and sunny.\"\n", + "\n", + "\n", + "tools = [search]\n", + "\n", + "# 2. Set up the language model\n", + "llm = llm.bind_tools(tools)\n", + "\n", + "\n", + "# 3. Define workflow components\n", + "def should_continue(state: MessagesState) -> Literal[\"tools\", END]:\n", + " \"\"\"Determines whether to use tools or end the conversation.\"\"\"\n", + " last_message = state[\"messages\"][-1]\n", + " return \"tools\" if last_message.tool_calls else END\n", + "\n", + "\n", + "async def call_model(state: MessagesState, config: RunnableConfig):\n", + " \"\"\"Calls the language model and returns the response.\"\"\"\n", + " response = llm.invoke(state[\"messages\"], config)\n", + " return {\"messages\": response}\n", + "\n", + "\n", + "# 4. Create the workflow graph\n", + "workflow = StateGraph(MessagesState)\n", + "workflow.add_node(\"agent\", call_model)\n", + "workflow.add_node(\"tools\", ToolNode(tools))\n", + "workflow.set_entry_point(\"agent\")\n", + "\n", + "# 5. Define graph edges\n", + "workflow.add_conditional_edges(\"agent\", should_continue)\n", + "workflow.add_edge(\"tools\", \"agent\")\n", + "\n", + "# 6. Compile the workflow\n", + "chain = workflow.compile()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bd27c49717e8" + }, + "source": [ + "Let's test the new chain with a dummy question:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a33402d68f7b" + }, + "outputs": [], + "source": [ + "input_message = {\"messages\": [(\"human\", \"What is the weather like in NY?\")]}\n", + "\n", + "async for event in chain.astream_events(input=input_message, version=\"v2\"):\n", + " if event[\"event\"] in SUPPORTED_EVENTS:\n", + " print(event[\"data\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2cf0ad148bf8" + }, + "source": [ + "This methodology is used for the chain defined in the [`app/patterns/langgraph_dummy_agent/chain.py`](../app/patterns/langgraph_dummy_agent/chain.py) file." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0ae7dad0966b" + }, + "source": [ + "### Use custom python code\n", + "\n", + "You can also use pure python code to orchestrate the different steps of your chain and emit `astream_events` [API compatible events](https://python.langchain.com/docs/how_to/streaming/#using-stream-events). \n", + "\n", + "This offers full flexibility in how the different steps of a chain are orchestrated and allows you to include other SDK frameworks such as [Vertex AI SDK](https://cloud.google.com/vertex-ai/docs/python-sdk/use-vertex-ai-python-sdk ), [LlamaIndex](https://www.llamaindex.ai/).\n", + "\n", + "We demonstrate this third methodology by implementing a RAG chain. The function `get_vector_store` provides a brute force Vector store (scikit-learn) initialized with data obtained from the [practictioners guide for MLOps](https://services.google.com/fh/files/misc/practitioners_guide_to_mlops_whitepaper.pdf)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6954a209c454" + }, + "outputs": [], + "source": [ + "llm = ChatVertexAI(model_name=\"gemini-1.5-flash-002\", temperature=0)\n", + "embedding = VertexAIEmbeddings(model_name=\"text-embedding-004\")\n", + "\n", + "\n", + "vector_store = get_vector_store(embedding=embedding)\n", + "retriever = vector_store.as_retriever(search_kwargs={\"k\": 20})\n", + "compressor = VertexAIRank(\n", + " project_id=PROJECT_ID,\n", + " location_id=\"global\",\n", + " ranking_config=\"default_ranking_config\",\n", + " title_field=\"id\",\n", + " top_n=5,\n", + ")\n", + "\n", + "\n", + "@tool\n", + "def retrieve_docs(query: str) -> list[Document]:\n", + " \"\"\"\n", + " Useful for retrieving relevant documents based on a query.\n", + " Use this when you need additional information to answer a question.\n", + "\n", + " Args:\n", + " query (str): The user's question or search query.\n", + "\n", + " Returns:\n", + " List[Document]: A list of the top-ranked Document objects, limited to TOP_K (5) results.\n", + " \"\"\"\n", + " retrieved_docs = retriever.invoke(query)\n", + " ranked_docs = compressor.compress_documents(documents=retrieved_docs, query=query)\n", + " return ranked_docs\n", + "\n", + "\n", + "@tool\n", + "def should_continue() -> None:\n", + " \"\"\"\n", + " Use this tool if you determine that you have enough context to respond to the questions of the user.\n", + " \"\"\"\n", + " return None\n", + "\n", + "\n", + "# Set up conversation inspector\n", + "inspect_conversation = inspect_conversation_template | llm.bind_tools(\n", + " [retrieve_docs, should_continue], tool_choice=\"any\"\n", + ")\n", + "\n", + "# Set up response chain\n", + "response_chain = rag_template | llm\n", + "\n", + "\n", + "@custom_chain\n", + "def chain(\n", + " input: dict[str, Any], **kwargs: Any\n", + ") -> Iterator[OnToolEndEvent | OnChatModelStreamEvent]:\n", + " \"\"\"\n", + " Implement a RAG QA chain with tool calls.\n", + "\n", + " This function is decorated with `custom_chain` to offer LangChain compatible\n", + " astream_events, support for synchronous invocation through the `invoke` method,\n", + " and OpenTelemetry tracing.\n", + " \"\"\"\n", + " # Inspect conversation and determine next action\n", + " inspection_result = inspect_conversation.invoke(input)\n", + " tool_call_result = inspection_result.tool_calls[0]\n", + "\n", + " # Execute the appropriate tool based on the inspection result\n", + " if tool_call_result[\"name\"] == \"retrieve_docs\":\n", + " # Retrieve relevant documents\n", + " docs = retrieve_docs.invoke(tool_call_result[\"args\"])\n", + " # Format the retrieved documents\n", + " formatted_docs = template_docs.format(docs=docs)\n", + " # Create a ToolMessage with the formatted documents\n", + " tool_message = ToolMessage(\n", + " tool_call_id=tool_call_result[\"name\"],\n", + " name=tool_call_result[\"name\"],\n", + " content=formatted_docs,\n", + " artifact=docs,\n", + " )\n", + " else:\n", + " # If no documents need to be retrieved, continue with the conversation\n", + " tool_message = should_continue.invoke(tool_call_result)\n", + "\n", + " # Update input messages with new information\n", + " input[\"messages\"] = input[\"messages\"] + [inspection_result, tool_message]\n", + "\n", + " # Yield tool results metadata\n", + " yield OnToolEndEvent(\n", + " data={\"input\": tool_call_result[\"args\"], \"output\": tool_message}\n", + " )\n", + "\n", + " # Stream LLM response\n", + " for chunk in response_chain.stream(input=input):\n", + " yield OnChatModelStreamEvent(data={\"chunk\": chunk})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2d59f2641aaf" + }, + "source": [ + "The `@custom_chain` decorator defined in `app/utils/output_types.py`:\n", + "- Enables compatibility with the `astream_events` LangChain API interface by offering a `chain.astream_events` method.\n", + "- Provides an `invoke` method for synchronous invocation. This method can be utilized for evaluation purposes.\n", + "- Adds OpenTelemetry tracing functionality." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1b0d16359361" + }, + "source": [ + "This methodology is used for the chain defined in `app/patterns/custom_rag_qa/chain.py` file." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "82f22f13fde6" + }, + "source": [ + "Let's test the custom chain we just created. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7a1bcac73ff5" + }, + "outputs": [], + "source": [ + "input_message = {\"messages\": [(\"human\", \"What is MLOps?\")]}\n", + "\n", + "async for event in chain.astream_events(input=input_message, version=\"v2\"):\n", + " if event[\"event\"] in SUPPORTED_EVENTS:\n", + " print(event[\"data\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8730b4b6bbff" + }, + "source": [ + "## Evaluation\n", + "\n", + "Evaluation is the activity of assessing the quality of the model's outputs, to gauge its understanding and success in fulfilling the prompt's instructions.\n", + "\n", + "In the context of Generative AI, evaluation extends beyond the evaluation of the model's outputs to include the evaluation of the chain's outputs and in some cases the evaluation of the intermediate steps (for example, the evaluation of the retriever's outputs).\n", + "\n", + "### Vertex AI Evaluation\n", + "To evaluate the chain's outputs, we'll utilize [Vertex AI Evaluation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-overview) to assess our AI application's performance. \n", + "Vertex AI Evaluation streamlines the evaluation process for generative AI by offering three key features:\n", + "\n", + "- [Pre-built Metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval): It provides a library of ready-to-use metrics for common evaluation tasks, saving you time and effort in defining your own. These metrics cover a range of areas, simplifying the assessment of different aspects of your model's performance.\n", + " \n", + "- [Custom Metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval): Beyond pre-built options, Vertex AI Evaluation allows you to define and implement custom metrics tailored to your specific needs and application requirements. \n", + " \n", + "- Strong Integration with [Vertex AI Experiments](https://cloud.google.com/vertex-ai/docs/experiments/intro-vertex-ai-experiments): Vertex AI Evaluation seamlessly integrates with Vertex AI Experiments, creating a unified workflow for tracking experiments and managing evaluation results.\n", + "\n", + "For a comprehensive list of samples on Vertex AI Evaluation, visit the [official documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models/evaluation-examples)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e214b9a02547" + }, + "source": [ + "Let's start by defining again a simple chain:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "086298b785fb" + }, + "outputs": [], + "source": [ + "template = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\n", + " \"system\",\n", + " \"You are a knowledgeable culinary assistant specializing in providing\"\n", + " \"detailed cooking recipes. Your responses should be informative, engaging, \"\n", + " \"and tailored to the user's specific requests. Include ingredients, \"\n", + " \"step-by-step instructions, cooking times, and any helpful tips or \"\n", + " \"variations. If asked about dietary restrictions or substitutions, offer \"\n", + " \"appropriate alternatives.\",\n", + " ),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", + " ]\n", + ")\n", + "\n", + "chain = template | llm" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "284ec00b23c7" + }, + "source": [ + "We then import the ground truth data we will use for evaluation. Data is stored in [`app/eval/data/chats.yaml`](../app/eval/data/chats.yaml)\n", + "Note: You might need to adjust the path depending on where your Jupyter kernel was initialized." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f7c05ab676d8" + }, + "outputs": [], + "source": [ + "y = yaml.safe_load(open(\"../app/eval/data/chats.yaml\"))\n", + "df = pd.DataFrame(y)\n", + "df" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "432ce6674c19" + }, + "source": [ + "We leverage the helper functions [`generate_multiturn_history`](../app/eval/utils.py) and [`batch_generate_messages`](../app/eval/utils.py) to prepare the data for evaluation and to generate the responses from the chain.\n", + "\n", + "You can see below the documentation for the two functions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3a6899faf8b3" + }, + "outputs": [], + "source": [ + "help(generate_multiturn_history)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6afe1b67475f" + }, + "outputs": [], + "source": [ + "help(batch_generate_messages)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "b5e1411cf0f4" + }, + "outputs": [], + "source": [ + "df = generate_multiturn_history(df)\n", + "df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9ff1aaed7eb0" + }, + "outputs": [], + "source": [ + "scored_data = batch_generate_messages(df, chain)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "933c797dc79e" + }, + "source": [ + "We extract the user message and the reference (ground truth) message from dataframe so that we can use them for evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5e5ba1acd013" + }, + "outputs": [], + "source": [ + "scored_data[\"user\"] = scored_data[\"human_message\"].apply(lambda x: x[\"content\"])\n", + "scored_data[\"reference\"] = scored_data[\"ai_message\"].apply(lambda x: x[\"content\"])\n", + "scored_data" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dc41cc55703e" + }, + "source": [ + "#### Define a CustomMetric using Gemini model\n", + "\n", + "Define a customized Gemini model-based metric function, with explanations for the score. The registered custom metrics are computed on the client side, without using online evaluation service APIs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "51c0ffb825e1" + }, + "outputs": [], + "source": [ + "evaluator_llm = ChatVertexAI(\n", + " model_name=\"gemini-1.5-flash-001\",\n", + " temperature=0,\n", + " response_mime_type=\"application/json\",\n", + ")\n", + "\n", + "\n", + "def custom_faithfulness(instance):\n", + " prompt = f\"\"\"You are examining written text content. Here is the text:\n", + "************\n", + "Written content: {instance[\"response\"]}\n", + "************\n", + "Original source data: {instance[\"reference\"]}\n", + "\n", + "Examine the text and determine whether the text is faithful or not.\n", + "Faithfulness refers to how accurately a generated summary reflects the essential information and key concepts present in the original source document.\n", + "A faithful summary stays true to the facts and meaning of the source text, without introducing distortions, hallucinations, or information that wasn't originally there.\n", + "\n", + "Your response must be an explanation of your thinking along with single integer number on a scale of 0-5, 0\n", + "the least faithful and 5 being the most faithful.\n", + "\n", + "Produce results in JSON\n", + "\n", + "Expected format:\n", + "\n", + "```json\n", + "{{\n", + " \"explanation\": \"< your explanation>\",\n", + " \"custom_faithfulness\": \n", + "}}\n", + "```\n", + "\"\"\"\n", + "\n", + " result = evaluator_llm.invoke([(\"human\", prompt)])\n", + " result = json.loads(result.content)\n", + " return result\n", + "\n", + "\n", + "# Register Custom Metric\n", + "custom_faithfulness_metric = CustomMetric(\n", + " name=\"custom_faithfulness\",\n", + " metric_function=custom_faithfulness,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "234f1b868cb9" + }, + "outputs": [], + "source": [ + "experiment_name = \"template-langchain-eval\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f5b15279f9d2" + }, + "source": [ + "We are now ready to run the evaluation. We will use different metrics, combining the custom metric we defined above with some pre-built metrics.\n", + "\n", + "Results of the evaluation will be automatically tagged into the experiment_name we define.\n", + "\n", + "You can click `View Experiment`, to see the experiment in Google Cloud Console." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4d3faf6014f5" + }, + "outputs": [], + "source": [ + "metrics = [\"fluency\", \"safety\", custom_faithfulness_metric]\n", + "\n", + "eval_task = EvalTask(\n", + " dataset=scored_data,\n", + " metrics=metrics,\n", + " experiment=experiment_name,\n", + " metric_column_mapping={\"prompt\": \"user\"},\n", + ")\n", + "eval_result = eval_task.evaluate()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c51deffd590c" + }, + "source": [ + "Once an eval result is produced, we are able to display summary metrics:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "KheOvIvtiRlz" + }, + "outputs": [], + "source": [ + "eval_result.summary_metrics" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JcALGGlwu0p_" + }, + "source": [ + "We are also able to display a pandas dataframe containing a detailed summary of how our eval dataset performed and relative granular metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9zJ686YYiWJC" + }, + "outputs": [], + "source": [ + "eval_result.metrics_table" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "378025ea68d0" + }, + "source": [ + "## Next Steps\n", + "\n", + "Congratulations on completing the getting started tutorial! You've learned different methodologies to build a chain and how to evaluate it. \n", + "Let's explore the next steps in your journey:\n", + "\n", + "### 1. Prepare for Production\n", + "\n", + "Once you're satisfied with your chain's evaluation results:\n", + "\n", + "1. Write your chain into the [`app/chain.py` file](../app/chain.py).\n", + "2. Remove the `patterns` folder and its associated tests (these are for demonstration only).\n", + "\n", + "### 2. Local Testing\n", + "\n", + "Test your chain using the playground:\n", + "\n", + "```bash\n", + "make playground\n", + "```\n", + "\n", + "This launches af feature-rich playground, including chat curation, user feedback collection, multimodal input, and more!\n", + "\n", + "\n", + "### 3. Production Deployment\n", + "\n", + "Once you are satisfied with the results, you can setup your CI/CD pipelines to deploy your chain to production.\n", + "\n", + "Please refer to the [deployment guide](../deployment/README.md) for more information on how to do that." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7bf6a5b0def0" + }, + "source": [ + "## Cleaning up\n", + "\n", + "To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\n", + "project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n", + "\n", + "Otherwise, you can delete the individual resources you created in this tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0236a4c1471d" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "# Delete Experiments\n", + "delete_experiments = True\n", + "if delete_experiments or os.getenv(\"IS_TESTING\"):\n", + " experiments_list = aiplatform.Experiment.list()\n", + " for experiment in experiments_list:\n", + " experiment.delete()" + ] + } + ], + "metadata": { + "colab": { + "name": "getting_started.ipynb", + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock new file mode 100644 index 0000000000..4e862fdc8b --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock @@ -0,0 +1,7864 @@ +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.3" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, + {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.10" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be7443669ae9c016b71f402e43208e13ddf00912f47f623ee5994e12fc7d4b3f"}, + {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b06b7843929e41a94ea09eb1ce3927865387e3e23ebe108e0d0d09b08d25be9"}, + {file = "aiohttp-3.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:333cf6cf8e65f6a1e06e9eb3e643a0c515bb850d470902274239fea02033e9a8"}, + {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:274cfa632350225ce3fdeb318c23b4a10ec25c0e2c880eff951a3842cf358ac1"}, + {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9e5e4a85bdb56d224f412d9c98ae4cbd032cc4f3161818f692cd81766eee65a"}, + {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b606353da03edcc71130b52388d25f9a30a126e04caef1fd637e31683033abd"}, + {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab5a5a0c7a7991d90446a198689c0535be89bbd6b410a1f9a66688f0880ec026"}, + {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578a4b875af3e0daaf1ac6fa983d93e0bbfec3ead753b6d6f33d467100cdc67b"}, + {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8105fd8a890df77b76dd3054cddf01a879fc13e8af576805d667e0fa0224c35d"}, + {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3bcd391d083f636c06a68715e69467963d1f9600f85ef556ea82e9ef25f043f7"}, + {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fbc6264158392bad9df19537e872d476f7c57adf718944cc1e4495cbabf38e2a"}, + {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e48d5021a84d341bcaf95c8460b152cfbad770d28e5fe14a768988c461b821bc"}, + {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2609e9ab08474702cc67b7702dbb8a80e392c54613ebe80db7e8dbdb79837c68"}, + {file = "aiohttp-3.10.10-cp310-cp310-win32.whl", hash = "sha256:84afcdea18eda514c25bc68b9af2a2b1adea7c08899175a51fe7c4fb6d551257"}, + {file = "aiohttp-3.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:9c72109213eb9d3874f7ac8c0c5fa90e072d678e117d9061c06e30c85b4cf0e6"}, + {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c30a0eafc89d28e7f959281b58198a9fa5e99405f716c0289b7892ca345fe45f"}, + {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:258c5dd01afc10015866114e210fb7365f0d02d9d059c3c3415382ab633fcbcb"}, + {file = "aiohttp-3.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:15ecd889a709b0080f02721255b3f80bb261c2293d3c748151274dfea93ac871"}, + {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3935f82f6f4a3820270842e90456ebad3af15810cf65932bd24da4463bc0a4c"}, + {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:413251f6fcf552a33c981c4709a6bba37b12710982fec8e558ae944bfb2abd38"}, + {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1720b4f14c78a3089562b8875b53e36b51c97c51adc53325a69b79b4b48ebcb"}, + {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:679abe5d3858b33c2cf74faec299fda60ea9de62916e8b67e625d65bf069a3b7"}, + {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79019094f87c9fb44f8d769e41dbb664d6e8fcfd62f665ccce36762deaa0e911"}, + {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2fb38c2ed905a2582948e2de560675e9dfbee94c6d5ccdb1301c6d0a5bf092"}, + {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a3f00003de6eba42d6e94fabb4125600d6e484846dbf90ea8e48a800430cc142"}, + {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1bbb122c557a16fafc10354b9d99ebf2f2808a660d78202f10ba9d50786384b9"}, + {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30ca7c3b94708a9d7ae76ff281b2f47d8eaf2579cd05971b5dc681db8caac6e1"}, + {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:df9270660711670e68803107d55c2b5949c2e0f2e4896da176e1ecfc068b974a"}, + {file = "aiohttp-3.10.10-cp311-cp311-win32.whl", hash = "sha256:aafc8ee9b742ce75044ae9a4d3e60e3d918d15a4c2e08a6c3c3e38fa59b92d94"}, + {file = "aiohttp-3.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:362f641f9071e5f3ee6f8e7d37d5ed0d95aae656adf4ef578313ee585b585959"}, + {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9294bbb581f92770e6ed5c19559e1e99255e4ca604a22c5c6397b2f9dd3ee42c"}, + {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a8fa23fe62c436ccf23ff930149c047f060c7126eae3ccea005f0483f27b2e28"}, + {file = "aiohttp-3.10.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c6a5b8c7926ba5d8545c7dd22961a107526562da31a7a32fa2456baf040939f"}, + {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:007ec22fbc573e5eb2fb7dec4198ef8f6bf2fe4ce20020798b2eb5d0abda6138"}, + {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9627cc1a10c8c409b5822a92d57a77f383b554463d1884008e051c32ab1b3742"}, + {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50edbcad60d8f0e3eccc68da67f37268b5144ecc34d59f27a02f9611c1d4eec7"}, + {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a45d85cf20b5e0d0aa5a8dca27cce8eddef3292bc29d72dcad1641f4ed50aa16"}, + {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b00807e2605f16e1e198f33a53ce3c4523114059b0c09c337209ae55e3823a8"}, + {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f2d4324a98062be0525d16f768a03e0bbb3b9fe301ceee99611dc9a7953124e6"}, + {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438cd072f75bb6612f2aca29f8bd7cdf6e35e8f160bc312e49fbecab77c99e3a"}, + {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:baa42524a82f75303f714108fea528ccacf0386af429b69fff141ffef1c534f9"}, + {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a7d8d14fe962153fc681f6366bdec33d4356f98a3e3567782aac1b6e0e40109a"}, + {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1277cd707c465cd09572a774559a3cc7c7a28802eb3a2a9472588f062097205"}, + {file = "aiohttp-3.10.10-cp312-cp312-win32.whl", hash = "sha256:59bb3c54aa420521dc4ce3cc2c3fe2ad82adf7b09403fa1f48ae45c0cbde6628"}, + {file = "aiohttp-3.10.10-cp312-cp312-win_amd64.whl", hash = "sha256:0e1b370d8007c4ae31ee6db7f9a2fe801a42b146cec80a86766e7ad5c4a259cf"}, + {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad7593bb24b2ab09e65e8a1d385606f0f47c65b5a2ae6c551db67d6653e78c28"}, + {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1eb89d3d29adaf533588f209768a9c02e44e4baf832b08118749c5fad191781d"}, + {file = "aiohttp-3.10.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3fe407bf93533a6fa82dece0e74dbcaaf5d684e5a51862887f9eaebe6372cd79"}, + {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aed5155f819873d23520919e16703fc8925e509abbb1a1491b0087d1cd969e"}, + {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f05e9727ce409358baa615dbeb9b969db94324a79b5a5cea45d39bdb01d82e6"}, + {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dffb610a30d643983aeb185ce134f97f290f8935f0abccdd32c77bed9388b42"}, + {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6658732517ddabe22c9036479eabce6036655ba87a0224c612e1ae6af2087e"}, + {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:741a46d58677d8c733175d7e5aa618d277cd9d880301a380fd296975a9cdd7bc"}, + {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e00e3505cd80440f6c98c6d69269dcc2a119f86ad0a9fd70bccc59504bebd68a"}, + {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ffe595f10566f8276b76dc3a11ae4bb7eba1aac8ddd75811736a15b0d5311414"}, + {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdfcf6443637c148c4e1a20c48c566aa694fa5e288d34b20fcdc58507882fed3"}, + {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d183cf9c797a5291e8301790ed6d053480ed94070637bfaad914dd38b0981f67"}, + {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77abf6665ae54000b98b3c742bc6ea1d1fb31c394bcabf8b5d2c1ac3ebfe7f3b"}, + {file = "aiohttp-3.10.10-cp313-cp313-win32.whl", hash = "sha256:4470c73c12cd9109db8277287d11f9dd98f77fc54155fc71a7738a83ffcc8ea8"}, + {file = "aiohttp-3.10.10-cp313-cp313-win_amd64.whl", hash = "sha256:486f7aabfa292719a2753c016cc3a8f8172965cabb3ea2e7f7436c7f5a22a151"}, + {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1b66ccafef7336a1e1f0e389901f60c1d920102315a56df85e49552308fc0486"}, + {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:acd48d5b80ee80f9432a165c0ac8cbf9253eaddb6113269a5e18699b33958dbb"}, + {file = "aiohttp-3.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3455522392fb15ff549d92fbf4b73b559d5e43dc522588f7eb3e54c3f38beee7"}, + {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c3b868724137f713a38376fef8120c166d1eadd50da1855c112fe97954aed8"}, + {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1dee8948d2137bb51fbb8a53cce6b1bcc86003c6b42565f008438b806cccd8"}, + {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5ce2ce7c997e1971b7184ee37deb6ea9922ef5163c6ee5aa3c274b05f9e12fa"}, + {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28529e08fde6f12eba8677f5a8608500ed33c086f974de68cc65ab218713a59d"}, + {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7db54c7914cc99d901d93a34704833568d86c20925b2762f9fa779f9cd2e70f"}, + {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03a42ac7895406220124c88911ebee31ba8b2d24c98507f4a8bf826b2937c7f2"}, + {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7e338c0523d024fad378b376a79faff37fafb3c001872a618cde1d322400a572"}, + {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:038f514fe39e235e9fef6717fbf944057bfa24f9b3db9ee551a7ecf584b5b480"}, + {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:64f6c17757251e2b8d885d728b6433d9d970573586a78b78ba8929b0f41d045a"}, + {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:93429602396f3383a797a2a70e5f1de5df8e35535d7806c9f91df06f297e109b"}, + {file = "aiohttp-3.10.10-cp38-cp38-win32.whl", hash = "sha256:c823bc3971c44ab93e611ab1a46b1eafeae474c0c844aff4b7474287b75fe49c"}, + {file = "aiohttp-3.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:54ca74df1be3c7ca1cf7f4c971c79c2daf48d9aa65dea1a662ae18926f5bc8ce"}, + {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01948b1d570f83ee7bbf5a60ea2375a89dfb09fd419170e7f5af029510033d24"}, + {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9fc1500fd2a952c5c8e3b29aaf7e3cc6e27e9cfc0a8819b3bce48cc1b849e4cc"}, + {file = "aiohttp-3.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f614ab0c76397661b90b6851a030004dac502e48260ea10f2441abd2207fbcc7"}, + {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00819de9e45d42584bed046314c40ea7e9aea95411b38971082cad449392b08c"}, + {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05646ebe6b94cc93407b3bf34b9eb26c20722384d068eb7339de802154d61bc5"}, + {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:998f3bd3cfc95e9424a6acd7840cbdd39e45bc09ef87533c006f94ac47296090"}, + {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9010c31cd6fa59438da4e58a7f19e4753f7f264300cd152e7f90d4602449762"}, + {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ea7ffc6d6d6f8a11e6f40091a1040995cdff02cfc9ba4c2f30a516cb2633554"}, + {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ef9c33cc5cbca35808f6c74be11eb7f5f6b14d2311be84a15b594bd3e58b5527"}, + {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce0cdc074d540265bfeb31336e678b4e37316849d13b308607efa527e981f5c2"}, + {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:597a079284b7ee65ee102bc3a6ea226a37d2b96d0418cc9047490f231dc09fe8"}, + {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7789050d9e5d0c309c706953e5e8876e38662d57d45f936902e176d19f1c58ab"}, + {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e7f8b04d83483577fd9200461b057c9f14ced334dcb053090cea1da9c8321a91"}, + {file = "aiohttp-3.10.10-cp39-cp39-win32.whl", hash = "sha256:c02a30b904282777d872266b87b20ed8cc0d1501855e27f831320f471d54d983"}, + {file = "aiohttp-3.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:edfe3341033a6b53a5c522c802deb2079eee5cbfbb0af032a55064bd65c73a23"}, + {file = "aiohttp-3.10.10.tar.gz", hash = "sha256:0631dd7c9f0822cc61c88586ca76d5b5ada26538097d0f1df510b082bad3411a"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.12.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "altair" +version = "5.4.1" +description = "Vega-Altair: A declarative statistical visualization library for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "altair-5.4.1-py3-none-any.whl", hash = "sha256:0fb130b8297a569d08991fb6fe763582e7569f8a04643bbd9212436e3be04aef"}, + {file = "altair-5.4.1.tar.gz", hash = "sha256:0ce8c2e66546cb327e5f2d7572ec0e7c6feece816203215613962f0ec1d76a82"}, +] + +[package.dependencies] +jinja2 = "*" +jsonschema = ">=3.0" +narwhals = ">=1.5.2" +packaging = "*" +typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""} + +[package.extras] +all = ["altair-tiles (>=0.3.0)", "anywidget (>=0.9.0)", "numpy", "pandas (>=0.25.3)", "pyarrow (>=11)", "vega-datasets (>=0.9.0)", "vegafusion[embed] (>=1.6.6)", "vl-convert-python (>=1.6.0)"] +dev = ["geopandas", "hatch", "ibis-framework[polars]", "ipython[kernel]", "mistune", "mypy", "pandas (>=0.25.3)", "pandas-stubs", "polars (>=0.20.3)", "pytest", "pytest-cov", "pytest-xdist[psutil] (>=3.5,<4.0)", "ruff (>=0.6.0)", "types-jsonschema", "types-setuptools"] +doc = ["docutils", "jinja2", "myst-parser", "numpydoc", "pillow (>=9,<10)", "pydata-sphinx-theme (>=0.14.1)", "scipy", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinxext-altair"] + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anthropic" +version = "0.36.0" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anthropic-0.36.0-py3-none-any.whl", hash = "sha256:9183b9eaa0f409f2047244d7ef02c9c3eb916959c0b2960f7605dcb6cabbf548"}, + {file = "anthropic-0.36.0.tar.gz", hash = "sha256:7b0b1457096605572a29559d9a8ce224b9389d379b410e7d1bf5e0c1379f9ee2"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + +[[package]] +name = "anyio" +version = "4.6.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "argon2-cffi" +version = "23.1.0" +description = "Argon2 for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, + {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, +] + +[package.dependencies] +argon2-cffi-bindings = "*" + +[package.extras] +dev = ["argon2-cffi[tests,typing]", "tox (>4)"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] +tests = ["hypothesis", "pytest"] +typing = ["mypy"] + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +optional = false +python-versions = ">=3.6" +files = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["cogapp", "pre-commit", "pytest", "wheel"] +tests = ["pytest"] + +[[package]] +name = "arrow" +version = "1.3.0" +description = "Better dates & times for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, + {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" +types-python-dateutil = ">=2.8.10" + +[package.extras] +doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] +test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] + +[[package]] +name = "asttokens" +version = "2.4.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, + {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, +] + +[package.dependencies] +six = ">=1.12.0" + +[package.extras] +astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] +test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] + +[[package]] +name = "async-lru" +version = "2.0.4" +description = "Simple LRU cache for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"}, + {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "babel" +version = "2.16.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +files = [ + {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, + {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, +] + +[package.extras] +dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "black" +version = "24.10.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +files = [ + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "bleach" +version = "6.1.0" +description = "An easy safelist-based HTML-sanitizing tool." +optional = false +python-versions = ">=3.8" +files = [ + {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, + {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, +] + +[package.dependencies] +six = ">=1.9.0" +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.3)"] + +[[package]] +name = "blinker" +version = "1.8.2" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.8" +files = [ + {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, + {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, +] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "codespell" +version = "2.3.0" +description = "Codespell" +optional = false +python-versions = ">=3.8" +files = [ + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, +] + +[package.extras] +dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] +hard-encoding-detection = ["chardet"] +toml = ["tomli"] +types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "comm" +version = "0.2.2" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +files = [ + {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, + {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, +] + +[package.dependencies] +traitlets = ">=4" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "contourpy" +version = "1.3.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, + {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, + {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, + {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, + {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, + {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, + {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, + {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, + {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, + {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, + {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, + {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, + {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, +] + +[package.dependencies] +numpy = ">=1.23" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +description = "Easily serialize dataclasses to and from JSON." +optional = false +python-versions = "<4.0,>=3.7" +files = [ + {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, + {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, +] + +[package.dependencies] +marshmallow = ">=3.18.0,<4.0.0" +typing-inspect = ">=0.4.0,<1" + +[[package]] +name = "debugpy" +version = "1.8.7" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b"}, + {file = "debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9"}, + {file = "debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55"}, + {file = "debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037"}, + {file = "debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f"}, + {file = "debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0"}, + {file = "debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2"}, + {file = "debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211"}, + {file = "debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706"}, + {file = "debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2"}, + {file = "debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca"}, + {file = "debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39"}, + {file = "debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40"}, + {file = "debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7"}, + {file = "debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba"}, + {file = "debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa"}, + {file = "debugpy-1.8.7-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:4b908291a1d051ef3331484de8e959ef3e66f12b5e610c203b5b75d2725613a7"}, + {file = "debugpy-1.8.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da8df5b89a41f1fd31503b179d0a84a5fdb752dddd5b5388dbd1ae23cda31ce9"}, + {file = "debugpy-1.8.7-cp38-cp38-win32.whl", hash = "sha256:b12515e04720e9e5c2216cc7086d0edadf25d7ab7e3564ec8b4521cf111b4f8c"}, + {file = "debugpy-1.8.7-cp38-cp38-win_amd64.whl", hash = "sha256:93176e7672551cb5281577cdb62c63aadc87ec036f0c6a486f0ded337c504596"}, + {file = "debugpy-1.8.7-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:90d93e4f2db442f8222dec5ec55ccfc8005821028982f1968ebf551d32b28907"}, + {file = "debugpy-1.8.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6db2a370e2700557a976eaadb16243ec9c91bd46f1b3bb15376d7aaa7632c81"}, + {file = "debugpy-1.8.7-cp39-cp39-win32.whl", hash = "sha256:a6cf2510740e0c0b4a40330640e4b454f928c7b99b0c9dbf48b11efba08a8cda"}, + {file = "debugpy-1.8.7-cp39-cp39-win_amd64.whl", hash = "sha256:6a9d9d6d31846d8e34f52987ee0f1a904c7baa4912bf4843ab39dadf9b8f3e0d"}, + {file = "debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae"}, + {file = "debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "docstring-parser" +version = "0.16" +description = "Parse Python docstrings in reST, Google and Numpydoc format" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637"}, + {file = "docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e"}, +] + +[[package]] +name = "entrypoints" +version = "0.4" +description = "Discover and load entry points from installed packages." +optional = false +python-versions = ">=3.6" +files = [ + {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, + {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "executing" +version = "2.1.0" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +files = [ + {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, + {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + +[[package]] +name = "extra-streamlit-components" +version = "0.1.71" +description = "An all-in-one place, to find complex or just natively unavailable components on streamlit." +optional = false +python-versions = ">=3.6" +files = [ + {file = "extra_streamlit_components-0.1.71-py3-none-any.whl", hash = "sha256:c8e6f98446adecd3002756362e50d0669693b7673afaa89cebfced6415cc6bd3"}, + {file = "extra_streamlit_components-0.1.71.tar.gz", hash = "sha256:d18314cf2ed009f95641882b50aa3bdb11b6a0eb6403fb43dbc8af1722419617"}, +] + +[package.dependencies] +streamlit = ">=1.18.0" + +[[package]] +name = "faker" +version = "30.3.0" +description = "Faker is a Python package that generates fake data for you." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Faker-30.3.0-py3-none-any.whl", hash = "sha256:e8a15fd1b0f72992b008f5ea94c70d3baa0cb51b0d5a0e899c17b1d1b23d2771"}, + {file = "faker-30.3.0.tar.gz", hash = "sha256:8760fbb34564fbb2f394345eef24aec5b8f6506b6cfcefe8195ed66dd1032bdb"}, +] + +[package.dependencies] +python-dateutil = ">=2.4" +typing-extensions = "*" + +[[package]] +name = "fastapi" +version = "0.110.3" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.110.3-py3-none-any.whl", hash = "sha256:fd7600612f755e4050beb74001310b5a7e1796d149c2ee363124abdfa0289d32"}, + {file = "fastapi-0.110.3.tar.gz", hash = "sha256:555700b0159379e94fdbfc6bb66a0f1c43f4cf7060f25239af3d84b63a656626"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.37.2,<0.38.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "fastjsonschema" +version = "2.20.0" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +files = [ + {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, + {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "favicon" +version = "0.7.0" +description = "Get a website's favicon." +optional = false +python-versions = "*" +files = [ + {file = "favicon-0.7.0-py2.py3-none-any.whl", hash = "sha256:7fec0617c73dcb8521ea788e1d38cdc7226c7cb8e28c81e11625d85fa1534880"}, + {file = "favicon-0.7.0.tar.gz", hash = "sha256:6d6b5a78de2a0d0084589f687f384b2ecd6a6527093fec564403b1a30605d7a8"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.7.0" +requests = ">=2.21.0" + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "flake8" +version = "7.1.1" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"}, + {file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.12.0,<2.13.0" +pyflakes = ">=3.2.0,<3.3.0" + +[[package]] +name = "flake8-pyproject" +version = "1.2.3" +description = "Flake8 plug-in loading the configuration from pyproject.toml" +optional = false +python-versions = ">= 3.6" +files = [ + {file = "flake8_pyproject-1.2.3-py3-none-any.whl", hash = "sha256:6249fe53545205af5e76837644dc80b4c10037e73a0e5db87ff562d75fb5bd4a"}, +] + +[package.dependencies] +Flake8 = ">=5" +TOMLi = {version = "*", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["pyTest", "pyTest-cov"] + +[[package]] +name = "fonttools" +version = "4.54.1" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, + {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, + {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, + {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, + {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, + {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, + {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, + {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, + {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, + {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, + {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, + {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"}, + {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"}, + {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"}, + {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"}, + {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"}, + {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, + {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "fqdn" +version = "1.5.1" +description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" +files = [ + {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, + {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, +] + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "fsspec" +version = "2024.9.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, + {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] + +[[package]] +name = "google-api-core" +version = "2.21.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.21.0-py3-none-any.whl", hash = "sha256:6869eacb2a37720380ba5898312af79a4d30b8bca1548fb4093e0697dc4bdf5d"}, + {file = "google_api_core-2.21.0.tar.gz", hash = "sha256:4a152fd11a9f774ea606388d423b68aa7e6d6a0ffe4c8266f74979613ec09f81"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = [ + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +grpcio-status = [ + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.149.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_python_client-2.149.0-py2.py3-none-any.whl", hash = "sha256:1a5232e9cfed8c201799d9327e4d44dc7ea7daa3c6e1627fca41aa201539c0da"}, + {file = "google_api_python_client-2.149.0.tar.gz", hash = "sha256:b9d68c6b14ec72580d66001bd33c5816b78e2134b93ccc5cf8f624516b561750"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.2.0,<1.0.0" +httplib2 = ">=0.19.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.35.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, + {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +files = [ + {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, + {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.19.0" + +[[package]] +name = "google-cloud-aiplatform" +version = "1.70.0" +description = "Vertex AI API client library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "google-cloud-aiplatform-1.70.0.tar.gz", hash = "sha256:e8edef6dbc7911380d0ea55c47544e799f62b891cb1a83b504ca1c09fff9884b"}, + {file = "google_cloud_aiplatform-1.70.0-py2.py3-none-any.whl", hash = "sha256:690e6041f03d3aa85102ac3f316c958d6f43a99aefb7fb3f8938dee56d08abd9"}, +] + +[package.dependencies] +docstring-parser = "<1" +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.8.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-bigquery = ">=1.15.0,<3.20.0 || >3.20.0,<4.0.0dev" +google-cloud-resource-manager = ">=1.3.3,<3.0.0dev" +google-cloud-storage = ">=1.32.0,<3.0.0dev" +packaging = ">=14.3" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" +pydantic = "<3" +shapely = "<3.0.0dev" + +[package.extras] +autologging = ["mlflow (>=1.27.0,<=2.16.0)"] +cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"] +endpoint = ["requests (>=2.28.1)"] +evaluation = ["pandas (>=1.0.0,<2.2.0)", "tqdm (>=4.23.0)"] +full = ["docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.114.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.16.0)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<2.10.dev0 || >=2.33.dev0,<=2.33.0)", "ray[default] (>=2.5,<=2.33.0)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)"] +langchain = ["langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "orjson (<=3.10.6)", "tenacity (<=8.3)"] +langchain-testing = ["absl-py", "cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "orjson (<=3.10.6)", "pydantic (>=2.6.3,<3)", "pytest-xdist", "tenacity (<=8.3)"] +lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] +metadata = ["numpy (>=1.15.0)", "pandas (>=1.0.0)"] +pipelines = ["pyyaml (>=5.3.1,<7)"] +prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.114.0)", "httpx (>=0.23.0,<0.25.0)", "starlette (>=0.17.1)", "uvicorn[standard] (>=0.16.0)"] +private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0)", "pyarrow (>=6.0.1)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<2.10.dev0 || >=2.33.dev0,<=2.33.0)", "ray[default] (>=2.5,<=2.33.0)", "setuptools (<70.0.0)"] +ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0)", "pyarrow (>=6.0.1)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<2.10.dev0 || >=2.33.dev0,<=2.33.0)", "ray[default] (>=2.5,<=2.33.0)", "ray[train]", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] +reasoningengine = ["cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "pydantic (>=2.6.3,<3)"] +tensorboard = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +testing = ["bigframes", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.114.0)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.16.0)", "nltk", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<2.10.dev0 || >=2.33.dev0,<=2.33.0)", "ray[default] (>=2.5,<=2.33.0)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "sentencepiece (>=0.2.0)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] +tokenization = ["sentencepiece (>=0.2.0)"] +vizier = ["google-vizier (>=0.1.6)"] +xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] + +[[package]] +name = "google-cloud-appengine-logging" +version = "1.4.5" +description = "Google Cloud Appengine Logging API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_appengine_logging-1.4.5-py2.py3-none-any.whl", hash = "sha256:344e0244404049b42164e4d6dc718ca2c81b393d066956e7cb85fd9407ed9c48"}, + {file = "google_cloud_appengine_logging-1.4.5.tar.gz", hash = "sha256:de7d766e5d67b19fc5833974b505b32d2a5bbdfb283fd941e320e7cfdae4cb83"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-audit-log" +version = "0.3.0" +description = "Google Cloud Audit Protos" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"}, + {file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.56.2,<2.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-bigquery" +version = "3.26.0" +description = "Google BigQuery API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_bigquery-3.26.0-py2.py3-none-any.whl", hash = "sha256:e0e9ad28afa67a18696e624cbccab284bf2c0a3f6eeb9eeb0426c69b943793a8"}, + {file = "google_cloud_bigquery-3.26.0.tar.gz", hash = "sha256:edbdc788beea659e04c0af7fe4dcd6d9155344b98951a0d5055bd2f15da4ba23"}, +] + +[package.dependencies] +google-api-core = {version = ">=2.11.1,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-core = ">=2.4.1,<3.0.0dev" +google-resumable-media = ">=2.0.0,<3.0dev" +packaging = ">=20.0.0" +python-dateutil = ">=2.7.3,<3.0dev" +requests = ">=2.21.0,<3.0.0dev" + +[package.extras] +all = ["Shapely (>=1.8.4,<3.0.0dev)", "bigquery-magics (>=0.1.0)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] +bigquery-v2 = ["proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)"] +bqstorage = ["google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] +geopandas = ["Shapely (>=1.8.4,<3.0.0dev)", "geopandas (>=0.9.0,<1.0dev)"] +ipython = ["bigquery-magics (>=0.1.0)"] +ipywidgets = ["ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)"] +opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] +pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "importlib-metadata (>=1.0.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] +tqdm = ["tqdm (>=4.7.4,<5.0.0dev)"] + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-discoveryengine" +version = "0.11.14" +description = "Google Cloud Discoveryengine API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-discoveryengine-0.11.14.tar.gz", hash = "sha256:d9c59092937728f81817ba8ff17e45f6ef88ec257b42f927e02db948e295993a"}, + {file = "google_cloud_discoveryengine-0.11.14-py3-none-any.whl", hash = "sha256:e3e56d78812298355f5c10ccc03b7e52603591c7699d9b3556416efd80911720"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-logging" +version = "3.11.2" +description = "Stackdriver Logging API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_logging-3.11.2-py2.py3-none-any.whl", hash = "sha256:0a755f04f184fbe77ad608258dc283a032485ebb4d0e2b2501964059ee9c898f"}, + {file = "google_cloud_logging-3.11.2.tar.gz", hash = "sha256:4897441c2b74f6eda9181c23a8817223b6145943314a821d64b729d30766cb2b"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +google-cloud-appengine-logging = ">=0.1.3,<2.0.0dev" +google-cloud-audit-log = ">=0.2.4,<1.0.0dev" +google-cloud-core = ">=2.0.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +opentelemetry-api = ">=1.9.0" +proto-plus = [ + {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, +] +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-resource-manager" +version = "1.12.5" +description = "Google Cloud Resource Manager API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_resource_manager-1.12.5-py2.py3-none-any.whl", hash = "sha256:2708a718b45c79464b7b21559c701b5c92e6b0b1ab2146d0a256277a623dc175"}, + {file = "google_cloud_resource_manager-1.12.5.tar.gz", hash = "sha256:b7af4254401ed4efa3aba3a929cb3ddb803fa6baf91a78485e45583597de5891"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.18.2" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_storage-2.18.2-py2.py3-none-any.whl", hash = "sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166"}, + {file = "google_cloud_storage-2.18.2.tar.gz", hash = "sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.7.2" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-cloud-trace" +version = "1.13.5" +description = "Google Cloud Trace API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_trace-1.13.5-py2.py3-none-any.whl", hash = "sha256:9e22f2eb8a07b7614f889ef59c041458d3241d3fbb76455acd2b4c9264429755"}, + {file = "google_cloud_trace-1.13.5.tar.gz", hash = "sha256:68954c1c4ae2536f5cc374736d065ce963708bd35c9d95ef36e8b3d4bf07c6a2"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-crc32c" +version = "1.6.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.9" +files = [ + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa"}, + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc"}, + {file = "google_crc32c-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f"}, + {file = "google_crc32c-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57"}, + {file = "google_crc32c-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d"}, + {file = "google_crc32c-1.6.0.tar.gz", hash = "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa"}, + {file = "google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.65.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, + {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, +] + +[package.dependencies] +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "greenlet" +version = "3.1.1" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpc-google-iam-v1" +version = "0.13.1" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpc-google-iam-v1-0.13.1.tar.gz", hash = "sha256:3ff4b2fd9d990965e410965253c0da6f66205d5a8291c4c31c6ebecca18a9001"}, + {file = "grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl", hash = "sha256:c3e86151a981811f30d5e7330f271cee53e73bb87755e88cc3b6f0c7b5fe374e"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "grpcio" +version = "1.64.1" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "grpcio-1.64.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:55697ecec192bc3f2f3cc13a295ab670f51de29884ca9ae6cd6247df55df2502"}, + {file = "grpcio-1.64.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3b64ae304c175671efdaa7ec9ae2cc36996b681eb63ca39c464958396697daff"}, + {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:bac71b4b28bc9af61efcdc7630b166440bbfbaa80940c9a697271b5e1dabbc61"}, + {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c024ffc22d6dc59000faf8ad781696d81e8e38f4078cb0f2630b4a3cf231a90"}, + {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7cd5c1325f6808b8ae31657d281aadb2a51ac11ab081ae335f4f7fc44c1721d"}, + {file = "grpcio-1.64.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0a2813093ddb27418a4c99f9b1c223fab0b053157176a64cc9db0f4557b69bd9"}, + {file = "grpcio-1.64.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2981c7365a9353f9b5c864595c510c983251b1ab403e05b1ccc70a3d9541a73b"}, + {file = "grpcio-1.64.1-cp310-cp310-win32.whl", hash = "sha256:1262402af5a511c245c3ae918167eca57342c72320dffae5d9b51840c4b2f86d"}, + {file = "grpcio-1.64.1-cp310-cp310-win_amd64.whl", hash = "sha256:19264fc964576ddb065368cae953f8d0514ecc6cb3da8903766d9fb9d4554c33"}, + {file = "grpcio-1.64.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:58b1041e7c870bb30ee41d3090cbd6f0851f30ae4eb68228955d973d3efa2e61"}, + {file = "grpcio-1.64.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bbc5b1d78a7822b0a84c6f8917faa986c1a744e65d762ef6d8be9d75677af2ca"}, + {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5841dd1f284bd1b3d8a6eca3a7f062b06f1eec09b184397e1d1d43447e89a7ae"}, + {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8caee47e970b92b3dd948371230fcceb80d3f2277b3bf7fbd7c0564e7d39068e"}, + {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73819689c169417a4f978e562d24f2def2be75739c4bed1992435d007819da1b"}, + {file = "grpcio-1.64.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6503b64c8b2dfad299749cad1b595c650c91e5b2c8a1b775380fcf8d2cbba1e9"}, + {file = "grpcio-1.64.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1de403fc1305fd96cfa75e83be3dee8538f2413a6b1685b8452301c7ba33c294"}, + {file = "grpcio-1.64.1-cp311-cp311-win32.whl", hash = "sha256:d4d29cc612e1332237877dfa7fe687157973aab1d63bd0f84cf06692f04c0367"}, + {file = "grpcio-1.64.1-cp311-cp311-win_amd64.whl", hash = "sha256:5e56462b05a6f860b72f0fa50dca06d5b26543a4e88d0396259a07dc30f4e5aa"}, + {file = "grpcio-1.64.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:4657d24c8063e6095f850b68f2d1ba3b39f2b287a38242dcabc166453e950c59"}, + {file = "grpcio-1.64.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:62b4e6eb7bf901719fce0ca83e3ed474ae5022bb3827b0a501e056458c51c0a1"}, + {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:ee73a2f5ca4ba44fa33b4d7d2c71e2c8a9e9f78d53f6507ad68e7d2ad5f64a22"}, + {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:198908f9b22e2672a998870355e226a725aeab327ac4e6ff3a1399792ece4762"}, + {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b9d0acaa8d835a6566c640f48b50054f422d03e77e49716d4c4e8e279665a1"}, + {file = "grpcio-1.64.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5e42634a989c3aa6049f132266faf6b949ec2a6f7d302dbb5c15395b77d757eb"}, + {file = "grpcio-1.64.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1a82e0b9b3022799c336e1fc0f6210adc019ae84efb7321d668129d28ee1efb"}, + {file = "grpcio-1.64.1-cp312-cp312-win32.whl", hash = "sha256:55260032b95c49bee69a423c2f5365baa9369d2f7d233e933564d8a47b893027"}, + {file = "grpcio-1.64.1-cp312-cp312-win_amd64.whl", hash = "sha256:c1a786ac592b47573a5bb7e35665c08064a5d77ab88a076eec11f8ae86b3e3f6"}, + {file = "grpcio-1.64.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:a011ac6c03cfe162ff2b727bcb530567826cec85eb8d4ad2bfb4bd023287a52d"}, + {file = "grpcio-1.64.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4d6dab6124225496010bd22690f2d9bd35c7cbb267b3f14e7a3eb05c911325d4"}, + {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:a5e771d0252e871ce194d0fdcafd13971f1aae0ddacc5f25615030d5df55c3a2"}, + {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c3c1b90ab93fed424e454e93c0ed0b9d552bdf1b0929712b094f5ecfe7a23ad"}, + {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20405cb8b13fd779135df23fabadc53b86522d0f1cba8cca0e87968587f50650"}, + {file = "grpcio-1.64.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0cc79c982ccb2feec8aad0e8fb0d168bcbca85bc77b080d0d3c5f2f15c24ea8f"}, + {file = "grpcio-1.64.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a3a035c37ce7565b8f4f35ff683a4db34d24e53dc487e47438e434eb3f701b2a"}, + {file = "grpcio-1.64.1-cp38-cp38-win32.whl", hash = "sha256:1257b76748612aca0f89beec7fa0615727fd6f2a1ad580a9638816a4b2eb18fd"}, + {file = "grpcio-1.64.1-cp38-cp38-win_amd64.whl", hash = "sha256:0a12ddb1678ebc6a84ec6b0487feac020ee2b1659cbe69b80f06dbffdb249122"}, + {file = "grpcio-1.64.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:75dbbf415026d2862192fe1b28d71f209e2fd87079d98470db90bebe57b33179"}, + {file = "grpcio-1.64.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e3d9f8d1221baa0ced7ec7322a981e28deb23749c76eeeb3d33e18b72935ab62"}, + {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5f8b75f64d5d324c565b263c67dbe4f0af595635bbdd93bb1a88189fc62ed2e5"}, + {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c84ad903d0d94311a2b7eea608da163dace97c5fe9412ea311e72c3684925602"}, + {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:940e3ec884520155f68a3b712d045e077d61c520a195d1a5932c531f11883489"}, + {file = "grpcio-1.64.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f10193c69fc9d3d726e83bbf0f3d316f1847c3071c8c93d8090cf5f326b14309"}, + {file = "grpcio-1.64.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac15b6c2c80a4d1338b04d42a02d376a53395ddf0ec9ab157cbaf44191f3ffdd"}, + {file = "grpcio-1.64.1-cp39-cp39-win32.whl", hash = "sha256:03b43d0ccf99c557ec671c7dede64f023c7da9bb632ac65dbc57f166e4970040"}, + {file = "grpcio-1.64.1-cp39-cp39-win_amd64.whl", hash = "sha256:ed6091fa0adcc7e4ff944090cf203a52da35c37a130efa564ded02b7aff63bcd"}, + {file = "grpcio-1.64.1.tar.gz", hash = "sha256:8d51dd1c59d5fa0f34266b80a3805ec29a1f26425c2a54736133f6d87fc4968a"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.64.1)"] + +[[package]] +name = "grpcio-status" +version = "1.62.3" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.6" +files = [ + {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, + {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.62.3" +protobuf = ">=4.21.6" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "htbuilder" +version = "0.6.2" +description = "A purely-functional HTML builder for Python. Think JSX rather than templates." +optional = false +python-versions = ">=3.5" +files = [ + {file = "htbuilder-0.6.2-py3-none-any.whl", hash = "sha256:5bb707221a0e2162e406c9ecf7bcc9efa9ad590c9f2180149440415f43a10bb5"}, + {file = "htbuilder-0.6.2.tar.gz", hash = "sha256:9979a4fb6e50ce732bf6f6bc0441039dcaa3a3fc70689d8f38f601ed8a1aeec0"}, +] + +[package.dependencies] +more-itertools = "*" + +[[package]] +name = "httpcore" +version = "1.0.6" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + +[[package]] +name = "huggingface-hub" +version = "0.25.2" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, + {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp", "minijinja (>=1.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "immutabledict" +version = "4.2.0" +description = "Immutable wrapper around dictionaries (a fork of frozendict)" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"}, + {file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +optional = false +python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipykernel" +version = "6.29.5" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "8.28.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.10" +files = [ + {file = "ipython-8.28.0-py3-none-any.whl", hash = "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35"}, + {file = "ipython-8.28.0.tar.gz", hash = "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5.13.0" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} + +[package.extras] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] +kernel = ["ipykernel"] +matplotlib = ["matplotlib"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] + +[[package]] +name = "ipywidgets" +version = "8.1.5" +description = "Jupyter interactive widgets" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245"}, + {file = "ipywidgets-8.1.5.tar.gz", hash = "sha256:870e43b1a35656a80c18c9503bbf2d16802db1cb487eec6fab27d683381dde17"}, +] + +[package.dependencies] +comm = ">=0.1.3" +ipython = ">=6.1.0" +jupyterlab-widgets = ">=3.0.12,<3.1.0" +traitlets = ">=4.3.1" +widgetsnbextension = ">=4.0.12,<4.1.0" + +[package.extras] +test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] + +[[package]] +name = "isoduration" +version = "20.11.0" +description = "Operations with ISO 8601 durations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, + {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, +] + +[package.dependencies] +arrow = ">=0.15.0" + +[[package]] +name = "jedi" +version = "0.19.1" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, + {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, +] + +[package.dependencies] +parso = ">=0.8.3,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.6.1" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"}, + {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"}, + {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"}, + {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"}, + {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"}, + {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"}, + {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"}, + {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"}, + {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"}, + {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"}, + {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"}, + {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"}, + {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"}, + {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"}, + {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"}, + {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"}, + {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"}, + {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"}, + {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"}, + {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"}, + {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"}, + {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"}, + {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"}, + {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"}, + {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"}, + {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"}, + {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"}, + {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"}, + {file = "jiter-0.6.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:31d8e00e1fb4c277df8ab6f31a671f509ebc791a80e5c61fdc6bc8696aaa297c"}, + {file = "jiter-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77c296d65003cd7ee5d7b0965f6acbe6cffaf9d1fa420ea751f60ef24e85fed5"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeeb0c0325ef96c12a48ea7e23e2e86fe4838e6e0a995f464cf4c79fa791ceeb"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a31c6fcbe7d6c25d6f1cc6bb1cba576251d32795d09c09961174fe461a1fb5bd"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59e2b37f3b9401fc9e619f4d4badcab2e8643a721838bcf695c2318a0475ae42"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bae5ae4853cb9644144e9d0755854ce5108d470d31541d83f70ca7ecdc2d1637"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df588e9c830b72d8db1dd7d0175af6706b0904f682ea9b1ca8b46028e54d6e9"}, + {file = "jiter-0.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15f8395e835cf561c85c1adee72d899abf2733d9df72e9798e6d667c9b5c1f30"}, + {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a99d4e0b5fc3b05ea732d67eb2092fe894e95a90e6e413f2ea91387e228a307"}, + {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a311df1fa6be0ccd64c12abcd85458383d96e542531bafbfc0a16ff6feda588f"}, + {file = "jiter-0.6.1-cp38-none-win32.whl", hash = "sha256:81116a6c272a11347b199f0e16b6bd63f4c9d9b52bc108991397dd80d3c78aba"}, + {file = "jiter-0.6.1-cp38-none-win_amd64.whl", hash = "sha256:13f9084e3e871a7c0b6e710db54444088b1dd9fbefa54d449b630d5e73bb95d0"}, + {file = "jiter-0.6.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1c53615fcfec3b11527c08d19cff6bc870da567ce4e57676c059a3102d3a082"}, + {file = "jiter-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f791b6a4da23238c17a81f44f5b55d08a420c5692c1fda84e301a4b036744eb1"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c97e90fec2da1d5f68ef121444c2c4fa72eabf3240829ad95cf6bbeca42a301"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3cbc1a66b4e41511209e97a2866898733c0110b7245791ac604117b7fb3fedb7"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4e85f9e12cd8418ab10e1fcf0e335ae5bb3da26c4d13a0fd9e6a17a674783b6"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08be33db6dcc374c9cc19d3633af5e47961a7b10d4c61710bd39e48d52a35824"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:677be9550004f5e010d673d3b2a2b815a8ea07a71484a57d3f85dde7f14cf132"}, + {file = "jiter-0.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8bd065be46c2eecc328e419d6557bbc37844c88bb07b7a8d2d6c91c7c4dedc9"}, + {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bd95375ce3609ec079a97c5d165afdd25693302c071ca60c7ae1cf826eb32022"}, + {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db459ed22d0208940d87f614e1f0ea5a946d29a3cfef71f7e1aab59b6c6b2afb"}, + {file = "jiter-0.6.1-cp39-none-win32.whl", hash = "sha256:d71c962f0971347bd552940ab96aa42ceefcd51b88c4ced8a27398182efa8d80"}, + {file = "jiter-0.6.1-cp39-none-win_amd64.whl", hash = "sha256:d465db62d2d10b489b7e7a33027c4ae3a64374425d757e963f86df5b5f2e7fc5"}, + {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"}, +] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "json5" +version = "0.9.25" +description = "A Python implementation of the JSON5 data format." +optional = false +python-versions = ">=3.8" +files = [ + {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"}, + {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +description = "Apply JSON-Patches (RFC 6902)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "3.0.0" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, + {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} +rpds-py = ">=0.7.1" +uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2024.10.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +files = [ + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "jupyter" +version = "1.1.1" +description = "Jupyter metapackage. Install all the Jupyter components in one go." +optional = false +python-versions = "*" +files = [ + {file = "jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83"}, + {file = "jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a"}, +] + +[package.dependencies] +ipykernel = "*" +ipywidgets = "*" +jupyter-console = "*" +jupyterlab = "*" +nbconvert = "*" +notebook = "*" + +[[package]] +name = "jupyter-client" +version = "8.6.3" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, +] + +[package.dependencies] +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +description = "Jupyter terminal console" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, + {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, +] + +[package.dependencies] +ipykernel = ">=6.14" +ipython = "*" +jupyter-client = ">=7.0.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +prompt-toolkit = ">=3.0.30" +pygments = "*" +pyzmq = ">=17" +traitlets = ">=5.4" + +[package.extras] +test = ["flaky", "pexpect", "pytest"] + +[[package]] +name = "jupyter-core" +version = "5.7.2" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyter-events" +version = "0.10.0" +description = "Jupyter Event System library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"}, + {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"}, +] + +[package.dependencies] +jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]} +python-json-logger = ">=2.0.4" +pyyaml = ">=5.3" +referencing = "*" +rfc3339-validator = "*" +rfc3986-validator = ">=0.1.1" +traitlets = ">=5.3" + +[package.extras] +cli = ["click", "rich"] +docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] +test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] + +[[package]] +name = "jupyter-lsp" +version = "2.2.5" +description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"}, + {file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"}, +] + +[package.dependencies] +jupyter-server = ">=1.1.2" + +[[package]] +name = "jupyter-server" +version = "2.14.2" +description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"}, + {file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"}, +] + +[package.dependencies] +anyio = ">=3.1.0" +argon2-cffi = ">=21.1" +jinja2 = ">=3.0.3" +jupyter-client = ">=7.4.4" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +jupyter-events = ">=0.9.0" +jupyter-server-terminals = ">=0.4.4" +nbconvert = ">=6.4.4" +nbformat = ">=5.3.0" +overrides = ">=5.0" +packaging = ">=22.0" +prometheus-client = ">=0.9" +pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""} +pyzmq = ">=24" +send2trash = ">=1.8.2" +terminado = ">=0.8.3" +tornado = ">=6.2.0" +traitlets = ">=5.6.0" +websocket-client = ">=1.7" + +[package.extras] +docs = ["ipykernel", "jinja2", "jupyter-client", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] +test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0,<9)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.7)", "pytest-timeout", "requests"] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.3" +description = "A Jupyter Server Extension Providing Terminals." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"}, + {file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"}, +] + +[package.dependencies] +pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} +terminado = ">=0.8.3" + +[package.extras] +docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] +test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] + +[[package]] +name = "jupyterlab" +version = "4.2.5" +description = "JupyterLab computational environment" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyterlab-4.2.5-py3-none-any.whl", hash = "sha256:73b6e0775d41a9fee7ee756c80f58a6bed4040869ccc21411dc559818874d321"}, + {file = "jupyterlab-4.2.5.tar.gz", hash = "sha256:ae7f3a1b8cb88b4f55009ce79fa7c06f99d70cd63601ee4aa91815d054f46f75"}, +] + +[package.dependencies] +async-lru = ">=1.0.0" +httpx = ">=0.25.0" +ipykernel = ">=6.5.0" +jinja2 = ">=3.0.3" +jupyter-core = "*" +jupyter-lsp = ">=2.0.0" +jupyter-server = ">=2.4.0,<3" +jupyterlab-server = ">=2.27.1,<3" +notebook-shim = ">=0.2" +packaging = "*" +setuptools = ">=40.1.0" +tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} +tornado = ">=6.2.0" +traitlets = "*" + +[package.extras] +dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"] +docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"] +docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"] +test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] +upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +description = "Pygments theme using JupyterLab CSS variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, + {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, +] + +[[package]] +name = "jupyterlab-server" +version = "2.27.3" +description = "A set of server components for JupyterLab and JupyterLab like applications." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"}, + {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"}, +] + +[package.dependencies] +babel = ">=2.10" +jinja2 = ">=3.0.3" +json5 = ">=0.9.0" +jsonschema = ">=4.18.0" +jupyter-server = ">=1.21,<3" +packaging = ">=21.3" +requests = ">=2.31" + +[package.extras] +docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] +openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] +test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.13" +description = "Jupyter interactive widgets for JupyterLab" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyterlab_widgets-3.0.13-py3-none-any.whl", hash = "sha256:e3cda2c233ce144192f1e29914ad522b2f4c40e77214b0cc97377ca3d323db54"}, + {file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.7" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, + {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, +] + +[[package]] +name = "langchain" +version = "0.3.3" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain-0.3.3-py3-none-any.whl", hash = "sha256:05ac98c674853c2386d043172820e37ceac9b913aaaf1e51217f0fc424112c72"}, + {file = "langchain-0.3.3.tar.gz", hash = "sha256:6435882996a029a60c61c356bbe51bab4a8f43a54210f5f03e3c4474d19d1842"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} +langchain-core = ">=0.3.10,<0.4.0" +langchain-text-splitters = ">=0.3.0,<0.4.0" +langsmith = ">=0.1.17,<0.2.0" +numpy = [ + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1,<2", markers = "python_version < \"3.12\""}, +] +pydantic = ">=2.7.4,<3.0.0" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" + +[[package]] +name = "langchain-community" +version = "0.3.2" +description = "Community contributed LangChain integrations." +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain_community-0.3.2-py3-none-any.whl", hash = "sha256:fffcd484c7674e81ceaa72a809962338bfb17ec8f9e0377ce4e9d884e6fe8ca5"}, + {file = "langchain_community-0.3.2.tar.gz", hash = "sha256:469bf5357a08c915cebc4c506dca4617eec737d82a9b6e340df5f3b814dc89bc"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +dataclasses-json = ">=0.5.7,<0.7" +langchain = ">=0.3.3,<0.4.0" +langchain-core = ">=0.3.10,<0.4.0" +langsmith = ">=0.1.125,<0.2.0" +numpy = [ + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1,<2", markers = "python_version < \"3.12\""}, +] +pydantic-settings = ">=2.4.0,<3.0.0" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" + +[[package]] +name = "langchain-core" +version = "0.3.10" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain_core-0.3.10-py3-none-any.whl", hash = "sha256:146be6bf2d3dc0d6f4feb46ef082182cf57b056e8163d45278529cd7b7343d2f"}, + {file = "langchain_core-0.3.10.tar.gz", hash = "sha256:63b9a3d03b52dba29cc248b752c574cdcb5fb04bd0fc5c76097fcbb7aaba5221"}, +] + +[package.dependencies] +jsonpatch = ">=1.33,<2.0" +langsmith = ">=0.1.125,<0.2.0" +packaging = ">=23.2,<25" +pydantic = [ + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, + {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""}, +] +PyYAML = ">=5.3" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" +typing-extensions = ">=4.7" + +[[package]] +name = "langchain-google-community" +version = "2.0.1" +description = "An integration package connecting miscellaneous Google's products and LangChain" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain_google_community-2.0.1-py3-none-any.whl", hash = "sha256:332ab8f046b3d7e83ac656568335694951a12ed3b793a8802c5a8cb5173e95ca"}, + {file = "langchain_google_community-2.0.1.tar.gz", hash = "sha256:683a93ea576dad081cfb85bfd395460be63dcb5ef184038f3b5122a0e3917093"}, +] + +[package.dependencies] +google-api-core = ">=2.17.1,<3.0.0" +google-api-python-client = ">=2.122.0,<3.0.0" +google-cloud-core = ">=2.4.1,<3.0.0" +google-cloud-discoveryengine = {version = ">=0.11.13,<0.12.0", optional = true, markers = "extra == \"vertexaisearch\""} +grpcio = ">=1.62.0,<2.0.0" +langchain-community = ">=0.3.0,<0.4.0" +langchain-core = ">=0.3.0,<0.4" + +[package.extras] +bigquery = ["google-cloud-bigquery (>=3.21.0,<4.0.0)"] +docai = ["gapic-google-longrunning (>=0.11.2,<0.12.0)", "google-cloud-contentwarehouse (>=0.7.7,<0.8.0)", "google-cloud-documentai (>=2.26.0,<3.0.0)", "google-cloud-documentai-toolbox (>=0.13.3a0,<0.14.0)"] +drive = ["google-auth-httplib2 (>=0.2.0,<0.3.0)", "google-auth-oauthlib (>=1.2.0,<2.0.0)"] +featurestore = ["db-dtypes (>=1.2.0,<2.0.0)", "google-cloud-aiplatform (>=1.56.0,<2.0.0)", "google-cloud-bigquery-storage (>=2.6.0,<3)", "pandas (>=1.0.0)", "pandas (>=2.0.0,<3.0)", "pyarrow (>=6.0.1)", "pydantic (>=2.7.4,<3.0.0)"] +gcs = ["google-cloud-storage (>=2.16.0,<3.0.0)"] +gmail = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "google-auth-httplib2 (>=0.2.0,<0.3.0)", "google-auth-oauthlib (>=1.2.0,<2.0.0)"] +places = ["googlemaps (>=4.10.0,<5.0.0)"] +speech = ["google-cloud-speech (>=2.26.0,<3.0.0)"] +texttospeech = ["google-cloud-texttospeech (>=2.16.3,<3.0.0)"] +translate = ["google-cloud-translate (>=3.15.3,<4.0.0)"] +vertexaisearch = ["google-cloud-discoveryengine (>=0.11.13,<0.12.0)"] +vision = ["google-cloud-vision (>=3.7.2,<4.0.0)"] + +[[package]] +name = "langchain-google-vertexai" +version = "2.0.3" +description = "An integration package connecting Google VertexAI and LangChain" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain_google_vertexai-2.0.3-py3-none-any.whl", hash = "sha256:43835bed9f03f6969b3f8b73356c44d7898d209c69bd5124b0a80c35d8cebdd0"}, + {file = "langchain_google_vertexai-2.0.3.tar.gz", hash = "sha256:6f71061b578c0cd44fd5a147b61f66a1486bfc8b1dc69b4ac31e0f3c470d90d8"}, +] + +[package.dependencies] +google-cloud-aiplatform = ">=1.56.0,<2.0.0" +google-cloud-storage = ">=2.17.0,<3.0.0" +httpx = ">=0.27.0,<0.28.0" +httpx-sse = ">=0.4.0,<0.5.0" +langchain-core = ">=0.3.0,<0.4" +pydantic = ">=2,<3" + +[package.extras] +anthropic = ["anthropic[vertexai] (>=0.30.0,<1)"] +mistral = ["langchain-mistralai (>=0.2.0,<1)"] + +[[package]] +name = "langchain-text-splitters" +version = "0.3.0" +description = "LangChain text splitting utilities" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "langchain_text_splitters-0.3.0-py3-none-any.whl", hash = "sha256:e84243e45eaff16e5b776cd9c81b6d07c55c010ebcb1965deb3d1792b7358e83"}, + {file = "langchain_text_splitters-0.3.0.tar.gz", hash = "sha256:f9fe0b4d244db1d6de211e7343d4abc4aa90295aa22e1f0c89e51f33c55cd7ce"}, +] + +[package.dependencies] +langchain-core = ">=0.3.0,<0.4.0" + +[[package]] +name = "langgraph" +version = "0.2.35" +description = "Building stateful, multi-actor applications with LLMs" +optional = false +python-versions = "<4.0,>=3.9.0" +files = [ + {file = "langgraph-0.2.35-py3-none-any.whl", hash = "sha256:e9dfa85f05bee25b732f7e9ee26f8fc127e937d531762042c83d6c13f39898a3"}, + {file = "langgraph-0.2.35.tar.gz", hash = "sha256:707752f99c887570a797373584c7412f8c1d23dc6980b3e43e3341aa6122aca9"}, +] + +[package.dependencies] +langchain-core = ">=0.2.39,<0.4" +langgraph-checkpoint = ">=2.0.0,<3.0.0" + +[[package]] +name = "langgraph-checkpoint" +version = "2.0.1" +description = "Library with base interfaces for LangGraph checkpoint savers." +optional = false +python-versions = "<4.0.0,>=3.9.0" +files = [ + {file = "langgraph_checkpoint-2.0.1-py3-none-any.whl", hash = "sha256:760edb722f6c64f2a39f41c7fbd56aaee47524f3399cf7c4bb8f5563b590ee68"}, + {file = "langgraph_checkpoint-2.0.1.tar.gz", hash = "sha256:31c34952b11a93108d76e5ad05398bfc94d8aafda5b4da7d17c26a121acce8e0"}, +] + +[package.dependencies] +langchain-core = ">=0.2.38,<0.4" +msgpack = ">=1.1.0,<2.0.0" + +[[package]] +name = "langsmith" +version = "0.1.134" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.134-py3-none-any.whl", hash = "sha256:ada98ad80ef38807725f32441a472da3dd28394010877751f48f458d3289da04"}, + {file = "langsmith-0.1.134.tar.gz", hash = "sha256:23abee3b508875a0e63c602afafffc02442a19cfd88f9daae05b3e9054fd6b61"}, +] + +[package.dependencies] +httpx = ">=0.23.0,<1" +orjson = ">=3.9.14,<4.0.0" +pydantic = [ + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, +] +requests = ">=2,<3" +requests-toolbelt = ">=1.0.0,<2.0.0" + +[[package]] +name = "lxml" +version = "5.3.0" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.6" +files = [ + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, + {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, + {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, + {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, + {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, + {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, + {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, + {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, + {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, + {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, + {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, + {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, + {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, + {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, + {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, + {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, + {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, + {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, + {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, + {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, + {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, + {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html-clean = ["lxml-html-clean"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=3.0.11)"] + +[[package]] +name = "markdown" +version = "3.7" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"}, + {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markdownlit" +version = "0.0.7" +description = "markdownlit adds a couple of lit Markdown capabilities to your Streamlit apps" +optional = false +python-versions = ">=3.6" +files = [ + {file = "markdownlit-0.0.7-py3-none-any.whl", hash = "sha256:b58bb539dcb52e0b040ab2fed32f1f3146cbb2746dc3812940d9dd359c378bb6"}, + {file = "markdownlit-0.0.7.tar.gz", hash = "sha256:553e2db454e2be4567caebef5176c98a40a7e24f7ea9c2fe8a1f05c1d9ea4005"}, +] + +[package.dependencies] +favicon = "*" +htbuilder = "*" +lxml = "*" +markdown = "*" +pymdown-extensions = "*" +streamlit = "*" +streamlit-extras = "*" + +[[package]] +name = "markupsafe" +version = "3.0.1" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, + {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, +] + +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "matplotlib" +version = "3.9.2" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, + {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, + {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, + {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, + {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, + {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, + {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, + {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, + {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, + {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, + {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, + {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, + {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, + {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mistune" +version = "3.0.2" +description = "A sane and fast Markdown parser with useful plugins and renderers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, + {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, +] + +[[package]] +name = "monotonic" +version = "1.6" +description = "An implementation of time.monotonic() for Python 2 & < 3.3" +optional = false +python-versions = "*" +files = [ + {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, + {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, +] + +[[package]] +name = "more-itertools" +version = "10.5.0" +description = "More routines for operating on iterables, beyond itertools" +optional = false +python-versions = ">=3.8" +files = [ + {file = "more-itertools-10.5.0.tar.gz", hash = "sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6"}, + {file = "more_itertools-10.5.0-py3-none-any.whl", hash = "sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef"}, +] + +[[package]] +name = "msgpack" +version = "1.1.0" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.8" +files = [ + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, +] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mypy" +version = "1.11.2" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, + {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, + {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, + {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, + {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, + {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, + {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, + {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, + {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, + {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, + {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, + {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, + {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, + {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, + {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, + {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, + {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, + {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "narwhals" +version = "1.9.3" +description = "Extremely lightweight compatibility layer between dataframe libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "narwhals-1.9.3-py3-none-any.whl", hash = "sha256:598d09227bef31f42ec25840728f3113e7e37c30d5430ae8126a1c8bd58177f9"}, + {file = "narwhals-1.9.3.tar.gz", hash = "sha256:5b3f52c02a86730a611497e6ffbaccaa3df86fc26470c23e3fb40dc2bc793ebb"}, +] + +[package.extras] +cudf = ["cudf (>=23.08.00)"] +dask = ["dask[dataframe] (>=2024.7)"] +modin = ["modin"] +pandas = ["pandas (>=0.25.3)"] +polars = ["polars (>=0.20.3)"] +pyarrow = ["pyarrow (>=11.0.0)"] + +[[package]] +name = "nbclient" +version = "0.10.0" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"}, + {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"}, +] + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +nbformat = ">=5.1" +traitlets = ">=5.4" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "7.16.4" +description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." +optional = false +python-versions = ">=3.8" +files = [ + {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"}, + {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +bleach = "!=5.0.0" +defusedxml = "*" +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +markupsafe = ">=2.0" +mistune = ">=2.0.3,<4" +nbclient = ">=0.5.0" +nbformat = ">=5.7" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +tinycss2 = "*" +traitlets = ">=5.1" + +[package.extras] +all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"] +docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] +qtpdf = ["pyqtwebengine (>=5.15)"] +qtpng = ["pyqtwebengine (>=5.15)"] +serve = ["tornado (>=6.1)"] +test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"] +webpdf = ["playwright"] + +[[package]] +name = "nbformat" +version = "5.10.4" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.8" +files = [ + {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, + {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, +] + +[package.dependencies] +fastjsonschema = ">=2.15" +jsonschema = ">=2.6" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + +[[package]] +name = "notebook" +version = "7.2.2" +description = "Jupyter Notebook - A web-based notebook environment for interactive computing" +optional = false +python-versions = ">=3.8" +files = [ + {file = "notebook-7.2.2-py3-none-any.whl", hash = "sha256:c89264081f671bc02eec0ed470a627ed791b9156cad9285226b31611d3e9fe1c"}, + {file = "notebook-7.2.2.tar.gz", hash = "sha256:2ef07d4220421623ad3fe88118d687bc0450055570cdd160814a59cf3a1c516e"}, +] + +[package.dependencies] +jupyter-server = ">=2.4.0,<3" +jupyterlab = ">=4.2.0,<4.3" +jupyterlab-server = ">=2.27.1,<3" +notebook-shim = ">=0.2,<0.3" +tornado = ">=6.2.0" + +[package.extras] +dev = ["hatch", "pre-commit"] +docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +description = "A shim layer for notebook traits and config" +optional = false +python-versions = ">=3.7" +files = [ + {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"}, + {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"}, +] + +[package.dependencies] +jupyter-server = ">=1.8,<3" + +[package.extras] +test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] + +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "opentelemetry-api" +version = "1.27.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.4.0" + +[[package]] +name = "opentelemetry-exporter-gcp-trace" +version = "1.7.0" +description = "Google Cloud Trace exporter for OpenTelemetry" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_gcp_trace-1.7.0-py3-none-any.whl", hash = "sha256:6fd66256a0a7feb3d87334ec7981f2a477c63cb6d7696b1bc603f105293b06db"}, + {file = "opentelemetry_exporter_gcp_trace-1.7.0.tar.gz", hash = "sha256:2752dacdd3fdbfd73079d5ac58fc68179758e76535690d479de2bfbe03646583"}, +] + +[package.dependencies] +google-cloud-trace = ">=1.1,<2.0" +opentelemetry-api = ">=1.0,<2.0" +opentelemetry-resourcedetector-gcp = ">=1.5.0dev0,<2.dev0" +opentelemetry-sdk = ">=1.0,<2.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.27.0" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8"}, +] + +[package.dependencies] +opentelemetry-proto = "1.27.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.27.0" +description = "OpenTelemetry Collector Protobuf over gRPC Exporter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +googleapis-common-protos = ">=1.52,<2.0" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = ">=1.27.0,<1.28.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.27.0" +description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_exporter_otlp_proto_http-1.27.0-py3-none-any.whl", hash = "sha256:688027575c9da42e179a69fe17e2d1eba9b14d81de8d13553a21d3114f3b4d75"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.27.0.tar.gz", hash = "sha256:2103479092d8eb18f61f3fbff084f67cc7f2d4a7d37e75304b8b56c1d09ebef5"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +googleapis-common-protos = ">=1.52,<2.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = ">=1.27.0,<1.28.0" +requests = ">=2.7,<3.0" + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.48b0" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"}, + {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +setuptools = ">=16.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-alephalpha" +version = "0.30.1" +description = "OpenTelemetry Aleph Alpha instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_alephalpha-0.30.1-py3-none-any.whl", hash = "sha256:bd48845eea45c4469e1d3b9d02b74ff34bf338d3ad03c7ee8f355b33cb4ea2c5"}, + {file = "opentelemetry_instrumentation_alephalpha-0.30.1.tar.gz", hash = "sha256:8254e8420567a63c94de72815d0ed6a7a50080b45f47b79c691ef0ae743e7c81"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-anthropic" +version = "0.30.1" +description = "OpenTelemetry Anthropic instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_anthropic-0.30.1-py3-none-any.whl", hash = "sha256:ae0724448a49d2228f28d7350e9ecaa34cad22cebb2a4d1c62a1772c480669b9"}, + {file = "opentelemetry_instrumentation_anthropic-0.30.1.tar.gz", hash = "sha256:51073978b449da9accae8fe78c42059471cdb51f91371858fb0d348571f769fa"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-bedrock" +version = "0.30.1" +description = "OpenTelemetry Bedrock instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_bedrock-0.30.1-py3-none-any.whl", hash = "sha256:3d81544e91e83a2eaf70ed3b370061712bfa32bfc026b230a73c1d5405d42b3a"}, + {file = "opentelemetry_instrumentation_bedrock-0.30.1.tar.gz", hash = "sha256:af3f77801d857c4f542457442f756ae0d5ebc718941ab9090cfa83d4831bca9b"}, +] + +[package.dependencies] +anthropic = ">=0.17.0" +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-chromadb" +version = "0.30.1" +description = "OpenTelemetry Chroma DB instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_chromadb-0.30.1-py3-none-any.whl", hash = "sha256:574f941e0f43b19157ae6a8b41b3c619e3c0b2823e86d4e1aab8682e08403e33"}, + {file = "opentelemetry_instrumentation_chromadb-0.30.1.tar.gz", hash = "sha256:d2e1528322fe0021dae706349bb83f9cdfa3a270d69aaa08ab6ffcb7957c8f0a"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-cohere" +version = "0.30.1" +description = "OpenTelemetry Cohere instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_cohere-0.30.1-py3-none-any.whl", hash = "sha256:afbec2c5f15a557c3944a4c5b39a00221add361c22e6ede209a01794812684be"}, + {file = "opentelemetry_instrumentation_cohere-0.30.1.tar.gz", hash = "sha256:dbd9233be8619ae9ff9e15c76ee52feca52b23d5fda1383bb36a53c0c9ef2d52"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-google-generativeai" +version = "0.30.1" +description = "OpenTelemetry Google Generative AI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_google_generativeai-0.30.1-py3-none-any.whl", hash = "sha256:44b1ce81ff254326408eb4150456907d3dcfef0100614ef95a90670d9836c0e2"}, + {file = "opentelemetry_instrumentation_google_generativeai-0.30.1.tar.gz", hash = "sha256:96cc1c46dd3b4dd405d9e72a54156f55c8958adb07e54369ef6022c440c7d92b"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-groq" +version = "0.30.1" +description = "OpenTelemetry Groq instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_groq-0.30.1-py3-none-any.whl", hash = "sha256:fd44b8eb183ad3083d2afa19e1e0a9f29ba9cfe1c9c8ca50867bccbca1bc8549"}, + {file = "opentelemetry_instrumentation_groq-0.30.1.tar.gz", hash = "sha256:48ddb421871b725be6fc19e79b622497d0a45598bddfd1fdf3481ac2a64c3523"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-haystack" +version = "0.30.1" +description = "OpenTelemetry Haystack instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_haystack-0.30.1-py3-none-any.whl", hash = "sha256:812d404eede5ae66ce711c2808d48e122fe96657ab3e90567f0430ea5886c844"}, + {file = "opentelemetry_instrumentation_haystack-0.30.1.tar.gz", hash = "sha256:583f3bc9b070b6da9fcf7d66041e886d7c66af7b49c1156966a7840a3f825335"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-lancedb" +version = "0.30.1" +description = "OpenTelemetry Lancedb instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_lancedb-0.30.1-py3-none-any.whl", hash = "sha256:2050ec49174bfbe2d13323a5ebba78ba0053df639e976d312477324541d0b0c2"}, + {file = "opentelemetry_instrumentation_lancedb-0.30.1.tar.gz", hash = "sha256:5802b438f4ace54cf21ef7610607dda778f05a96e95f1863748df0931ab6db70"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-langchain" +version = "0.30.1" +description = "OpenTelemetry Langchain instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_langchain-0.30.1-py3-none-any.whl", hash = "sha256:0d7cf8332a30a0bfe127a4685c3707b9a9deaaa70929521488ae5ef43df6eba0"}, + {file = "opentelemetry_instrumentation_langchain-0.30.1.tar.gz", hash = "sha256:254b6e6c0e4ad7258f7790694aa6f724cd85928870fcaed568d55b6c38a6f5e3"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-llamaindex" +version = "0.30.1" +description = "OpenTelemetry LlamaIndex instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_llamaindex-0.30.1-py3-none-any.whl", hash = "sha256:e709dff9dacddd4a62b3aaf5eae7dfeb167532c037b5a9dd2580661e367aa06b"}, + {file = "opentelemetry_instrumentation_llamaindex-0.30.1.tar.gz", hash = "sha256:a61479a4eed9aba81f3b5546ef1b38e4ca111ceb689d092dea9f629678eb98a7"}, +] + +[package.dependencies] +inflection = ">=0.5.1,<0.6.0" +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-marqo" +version = "0.30.1" +description = "OpenTelemetry Marqo instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_marqo-0.30.1-py3-none-any.whl", hash = "sha256:a22ef1fec03011575a82ded0e79663a76715c351123a54ab1990696451b7447e"}, + {file = "opentelemetry_instrumentation_marqo-0.30.1.tar.gz", hash = "sha256:885546ea7cb14a4248441c1ac5ba085aa55360c838f4976e3d36eedf150e1023"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-milvus" +version = "0.30.1" +description = "OpenTelemetry Milvus instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_milvus-0.30.1-py3-none-any.whl", hash = "sha256:b19e7dda34d77c02064e25a9a50d1e8c35b7768d8754455ee490dc2a23438a7f"}, + {file = "opentelemetry_instrumentation_milvus-0.30.1.tar.gz", hash = "sha256:0dc40e392c9b711e7b71ea9b2fb0b91059f80c9b76e26e44885d01931c834ef3"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-mistralai" +version = "0.30.1" +description = "OpenTelemetry Mistral AI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_mistralai-0.30.1-py3-none-any.whl", hash = "sha256:2f1f8ccc2659747a4ce10326298ad2abf956341f90f9f90e35985414071a5fb8"}, + {file = "opentelemetry_instrumentation_mistralai-0.30.1.tar.gz", hash = "sha256:99162a2e90397fe19117b9589742e610e19880355694e2d8ff0b18dd96b5d141"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-ollama" +version = "0.30.1" +description = "OpenTelemetry Ollama instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_ollama-0.30.1-py3-none-any.whl", hash = "sha256:d654bf58c800c33da14c916f4998265b8b962290c862e6dcfd820ebc4b61f797"}, + {file = "opentelemetry_instrumentation_ollama-0.30.1.tar.gz", hash = "sha256:6aa7e4da2fd56ff110a8ddd4c3d96a4590764c2ae8876bcb7ad6830023f7865b"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[package.extras] +instruments = ["ollama (>=0.2.0,<0.3.0)"] + +[[package]] +name = "opentelemetry-instrumentation-openai" +version = "0.30.1" +description = "OpenTelemetry OpenAI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_openai-0.30.1-py3-none-any.whl", hash = "sha256:da066a20724e66a7dc7f2af12d694687d4f9c1c8baaadb3b40f4755d45da9bde"}, + {file = "opentelemetry_instrumentation_openai-0.30.1.tar.gz", hash = "sha256:7dbd15b5fbe9f855a47796871eaf7a55de18dbd87e823ec1e23f879fe4f2f25f"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" +tiktoken = ">=0.6.0,<1" + +[[package]] +name = "opentelemetry-instrumentation-pinecone" +version = "0.30.1" +description = "OpenTelemetry Pinecone instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_pinecone-0.30.1-py3-none-any.whl", hash = "sha256:aa09476946e3595cf506b803c3d32d8e8effe038e7b621356c2fe27aa52f1651"}, + {file = "opentelemetry_instrumentation_pinecone-0.30.1.tar.gz", hash = "sha256:c69e5d4fb20ede116ee13aae3f7a962f9c044997ab63f9c7d15f78d27ceb2091"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-qdrant" +version = "0.30.1" +description = "OpenTelemetry Qdrant instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_qdrant-0.30.1-py3-none-any.whl", hash = "sha256:5cbee504e871824204f6c122b8ed88d20c815f0f9fdf48b04ec8600269ed7990"}, + {file = "opentelemetry_instrumentation_qdrant-0.30.1.tar.gz", hash = "sha256:baa29435e447e1b4b599b233149a25d215aec79a8599ef255dc52643dc2c0042"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-replicate" +version = "0.30.1" +description = "OpenTelemetry Replicate instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_replicate-0.30.1-py3-none-any.whl", hash = "sha256:879bd72c82f00b49b6a9e1f3089a468ef8c74cc3004a12d73590cad3aa33217b"}, + {file = "opentelemetry_instrumentation_replicate-0.30.1.tar.gz", hash = "sha256:bbbc1ccbf94d36b1e09a34086ba9260e1746b666ec3d58186169ef8c2676287f"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.48b0" +description = "OpenTelemetry requests instrumentation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_requests-0.48b0-py3-none-any.whl", hash = "sha256:d4f01852121d0bd4c22f14f429654a735611d4f7bf3cf93f244bdf1489b2233d"}, + {file = "opentelemetry_instrumentation_requests-0.48b0.tar.gz", hash = "sha256:67ab9bd877a0352ee0db4616c8b4ae59736ddd700c598ed907482d44f4c9a2b3"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +opentelemetry-semantic-conventions = "0.48b0" +opentelemetry-util-http = "0.48b0" + +[package.extras] +instruments = ["requests (>=2.0,<3.0)"] + +[[package]] +name = "opentelemetry-instrumentation-sqlalchemy" +version = "0.48b0" +description = "OpenTelemetry SQLAlchemy instrumentation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_sqlalchemy-0.48b0-py3-none-any.whl", hash = "sha256:625848a34aa5770cb4b1dcdbd95afce4307a0230338711101325261d739f391f"}, + {file = "opentelemetry_instrumentation_sqlalchemy-0.48b0.tar.gz", hash = "sha256:dbf2d5a755b470e64e5e2762b56f8d56313787e4c7d71a87fe25c33f48eb3493"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +opentelemetry-semantic-conventions = "0.48b0" +packaging = ">=21.0" +wrapt = ">=1.11.2" + +[package.extras] +instruments = ["sqlalchemy"] + +[[package]] +name = "opentelemetry-instrumentation-threading" +version = "0.48b0" +description = "Thread context propagation support for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_threading-0.48b0-py3-none-any.whl", hash = "sha256:e81cb3a5342bbbc3f40b4c3f5180629905d504e2f364dc436ecb1123491f4080"}, + {file = "opentelemetry_instrumentation_threading-0.48b0.tar.gz", hash = "sha256:daef8a6fd06aa8b35594582d96ffb30954c4a9ae1ffdace7b00d0904fd650d2e"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-together" +version = "0.30.1" +description = "OpenTelemetry Together AI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_together-0.30.1-py3-none-any.whl", hash = "sha256:004a3d885acd562a8e6b36da77619664b8d6b5c865043e11c61334005c8901b1"}, + {file = "opentelemetry_instrumentation_together-0.30.1.tar.gz", hash = "sha256:890c8b02f2b45776912759d2cb0efa966b5fe6da642bb8c820ddeb6c890d2126"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-transformers" +version = "0.30.1" +description = "OpenTelemetry transformers instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_transformers-0.30.1-py3-none-any.whl", hash = "sha256:70faaba8dbc145f71e717ab9f10605b7b65ebc9317a6379b025fca47dca3723d"}, + {file = "opentelemetry_instrumentation_transformers-0.30.1.tar.gz", hash = "sha256:f3767ec5842dce311dd9a23009ec836c61a3ba1c3c667fd6d1a6ebaa0a73dd89"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-urllib3" +version = "0.48b0" +description = "OpenTelemetry urllib3 instrumentation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation_urllib3-0.48b0-py3-none-any.whl", hash = "sha256:3ba2b874d798996a105fcb887491ecf78c1c47dc39516c8544680b2e32fc8d18"}, + {file = "opentelemetry_instrumentation_urllib3-0.48b0.tar.gz", hash = "sha256:6b03d6ee9b6e001cc73bb07ccf71bc42886eb006885ff6d53b5b00751bb01326"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +opentelemetry-semantic-conventions = "0.48b0" +opentelemetry-util-http = "0.48b0" +wrapt = ">=1.0.0,<2.0.0" + +[package.extras] +instruments = ["urllib3 (>=1.0.0,<3.0.0)"] + +[[package]] +name = "opentelemetry-instrumentation-vertexai" +version = "0.30.1" +description = "OpenTelemetry Vertex AI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_vertexai-0.30.1-py3-none-any.whl", hash = "sha256:b8e15e5062b29684337e3dd631ec94a581743f0ab4f059777c009b36646d5b08"}, + {file = "opentelemetry_instrumentation_vertexai-0.30.1.tar.gz", hash = "sha256:776a4c1e753134ff5ed678ed1805f139e09538ef2be0b549f0c413de314af555"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-watsonx" +version = "0.30.1" +description = "OpenTelemetry IBM Watsonx Instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_watsonx-0.30.1-py3-none-any.whl", hash = "sha256:43e82de6004045351d1fcccbcd1a0b76076518193a6039a712130a430282d139"}, + {file = "opentelemetry_instrumentation_watsonx-0.30.1.tar.gz", hash = "sha256:857d0cae0183122a841905a62a5904c670af4e490fd7a5863c0a573f70e7783c"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-instrumentation-weaviate" +version = "0.30.1" +description = "OpenTelemetry Weaviate instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_weaviate-0.30.1-py3-none-any.whl", hash = "sha256:3af8c67b9c1e9215699b471611bb71f720f7073c88b492118727c18c6ab58c7a"}, + {file = "opentelemetry_instrumentation_weaviate-0.30.1.tar.gz", hash = "sha256:bdafd201da19c7abceec67c4e951eea104a88670af69dcd27404794b04dacfbb"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.1" + +[[package]] +name = "opentelemetry-proto" +version = "1.27.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace"}, + {file = "opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6"}, +] + +[package.dependencies] +protobuf = ">=3.19,<5.0" + +[[package]] +name = "opentelemetry-resourcedetector-gcp" +version = "1.7.0a0" +description = "Google Cloud resource detector for OpenTelemetry" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_resourcedetector_gcp-1.7.0a0-py3-none-any.whl", hash = "sha256:f04821cb79e0d165b2678f115f09ab419311e7f070286c742ceef2cdf5bc0790"}, + {file = "opentelemetry_resourcedetector_gcp-1.7.0a0.tar.gz", hash = "sha256:70e49a9ec282089cae60c450fc9fa1bb63eafac2e1747acb9603574f430e3666"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.0,<2.0" +opentelemetry-sdk = ">=1.0,<2.0" +requests = ">=2.24,<3.0" +typing-extensions = ">=4.0,<5.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.27.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, +] + +[package.dependencies] +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.48b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.27.0" + +[[package]] +name = "opentelemetry-semantic-conventions-ai" +version = "0.4.1" +description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.48b0" +description = "Web util for OpenTelemetry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_util_http-0.48b0-py3-none-any.whl", hash = "sha256:76f598af93aab50328d2a69c786beaedc8b6a7770f7a818cc307eb353debfffb"}, + {file = "opentelemetry_util_http-0.48b0.tar.gz", hash = "sha256:60312015153580cc20f322e5cdc3d3ecad80a71743235bdb77716e742814623c"}, +] + +[[package]] +name = "orjson" +version = "3.10.7" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, +] + +[[package]] +name = "overrides" +version = "7.7.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +description = "Utilities for writing pandoc filters in python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, + {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, +] + +[[package]] +name = "parso" +version = "0.8.4" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "plotly" +version = "5.24.1" +description = "An open-source, interactive data visualization library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, + {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "posthog" +version = "3.7.0" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +files = [ + {file = "posthog-3.7.0-py2.py3-none-any.whl", hash = "sha256:3555161c3a9557b5666f96d8e1f17f410ea0f07db56e399e336a1656d4e5c722"}, + {file = "posthog-3.7.0.tar.gz", hash = "sha256:b095d4354ba23f8b346ab5daed8ecfc5108772f922006982dfe8b2d29ebc6e0e"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +monotonic = ">=1.5" +python-dateutil = ">2.1" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] +sentry = ["django", "sentry-sdk"] +test = ["coverage", "django", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-timeout"] + +[[package]] +name = "prometheus-client" +version = "0.21.0" +description = "Python client for the Prometheus monitoring system." +optional = false +python-versions = ">=3.8" +files = [ + {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, + {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, +] + +[package.extras] +twisted = ["twisted"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.48" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "propcache" +version = "0.2.0" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.8" +files = [ + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"}, + {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"}, + {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"}, + {file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"}, + {file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"}, + {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"}, + {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"}, + {file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"}, + {file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"}, + {file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"}, + {file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"}, + {file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"}, + {file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"}, + {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"}, + {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"}, +] + +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "4.25.5" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, +] + +[[package]] +name = "psutil" +version = "6.0.0" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, + {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, + {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, + {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, + {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, + {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, + {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, + {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, + {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, + {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "pyarrow" +version = "17.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycodestyle" +version = "2.12.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"}, + {file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.9.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.23.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.5.2" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"}, + {file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" + +[package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pydeck" +version = "0.9.1" +description = "Widget for deck.gl maps" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038"}, + {file = "pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605"}, +] + +[package.dependencies] +jinja2 = ">=2.10.1" +numpy = ">=1.16.4" + +[package.extras] +carto = ["pydeck-carto"] +jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] + +[[package]] +name = "pyflakes" +version = "3.2.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, +] + +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymdown-extensions" +version = "10.11.2" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf"}, + {file = "pymdown_extensions-10.11.2.tar.gz", hash = "sha256:bc8847ecc9e784a098efd35e20cba772bc5a1b529dfcef9dc1972db9021a1049"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.12)"] + +[[package]] +name = "pyparsing" +version = "3.1.4" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, + {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pypdf" +version = "4.3.1" +description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypdf-4.3.1-py3-none-any.whl", hash = "sha256:64b31da97eda0771ef22edb1bfecd5deee4b72c3d1736b7df2689805076d6418"}, + {file = "pypdf-4.3.1.tar.gz", hash = "sha256:b2f37fe9a3030aa97ca86067a56ba3f9d3565f9a791b305c7355d8392c30d91b"}, +] + +[package.dependencies] +typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +crypto = ["PyCryptodome", "cryptography"] +dev = ["black", "flit", "pip-tools", "pre-commit (<2.18.0)", "pytest-cov", "pytest-socket", "pytest-timeout", "pytest-xdist", "wheel"] +docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] +full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] +image = ["Pillow (>=8.0.0)"] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-json-logger" +version = "2.0.7" +description = "A python library adding a json log formatter" +optional = false +python-versions = ">=3.6" +files = [ + {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, + {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, +] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pywin32" +version = "307" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-307-cp310-cp310-win32.whl", hash = "sha256:f8f25d893c1e1ce2d685ef6d0a481e87c6f510d0f3f117932781f412e0eba31b"}, + {file = "pywin32-307-cp310-cp310-win_amd64.whl", hash = "sha256:36e650c5e5e6b29b5d317385b02d20803ddbac5d1031e1f88d20d76676dd103d"}, + {file = "pywin32-307-cp310-cp310-win_arm64.whl", hash = "sha256:0c12d61e0274e0c62acee79e3e503c312426ddd0e8d4899c626cddc1cafe0ff4"}, + {file = "pywin32-307-cp311-cp311-win32.whl", hash = "sha256:fec5d27cc893178fab299de911b8e4d12c5954e1baf83e8a664311e56a272b75"}, + {file = "pywin32-307-cp311-cp311-win_amd64.whl", hash = "sha256:987a86971753ed7fdd52a7fb5747aba955b2c7fbbc3d8b76ec850358c1cc28c3"}, + {file = "pywin32-307-cp311-cp311-win_arm64.whl", hash = "sha256:fd436897c186a2e693cd0437386ed79f989f4d13d6f353f8787ecbb0ae719398"}, + {file = "pywin32-307-cp312-cp312-win32.whl", hash = "sha256:07649ec6b01712f36debf39fc94f3d696a46579e852f60157a729ac039df0815"}, + {file = "pywin32-307-cp312-cp312-win_amd64.whl", hash = "sha256:00d047992bb5dcf79f8b9b7c81f72e0130f9fe4b22df613f755ab1cc021d8347"}, + {file = "pywin32-307-cp312-cp312-win_arm64.whl", hash = "sha256:b53658acbfc6a8241d72cc09e9d1d666be4e6c99376bc59e26cdb6223c4554d2"}, + {file = "pywin32-307-cp313-cp313-win32.whl", hash = "sha256:ea4d56e48dc1ab2aa0a5e3c0741ad6e926529510516db7a3b6981a1ae74405e5"}, + {file = "pywin32-307-cp313-cp313-win_amd64.whl", hash = "sha256:576d09813eaf4c8168d0bfd66fb7cb3b15a61041cf41598c2db4a4583bf832d2"}, + {file = "pywin32-307-cp313-cp313-win_arm64.whl", hash = "sha256:b30c9bdbffda6a260beb2919f918daced23d32c79109412c2085cbc513338a0a"}, + {file = "pywin32-307-cp37-cp37m-win32.whl", hash = "sha256:5101472f5180c647d4525a0ed289ec723a26231550dbfd369ec19d5faf60e511"}, + {file = "pywin32-307-cp37-cp37m-win_amd64.whl", hash = "sha256:05de55a7c110478dc4b202230e98af5e0720855360d2b31a44bb4e296d795fba"}, + {file = "pywin32-307-cp38-cp38-win32.whl", hash = "sha256:13d059fb7f10792542082f5731d5d3d9645320fc38814759313e5ee97c3fac01"}, + {file = "pywin32-307-cp38-cp38-win_amd64.whl", hash = "sha256:7e0b2f93769d450a98ac7a31a087e07b126b6d571e8b4386a5762eb85325270b"}, + {file = "pywin32-307-cp39-cp39-win32.whl", hash = "sha256:55ee87f2f8c294e72ad9d4261ca423022310a6e79fb314a8ca76ab3f493854c6"}, + {file = "pywin32-307-cp39-cp39-win_amd64.whl", hash = "sha256:e9d5202922e74985b037c9ef46778335c102b74b95cec70f629453dbe7235d87"}, +] + +[[package]] +name = "pywinpty" +version = "2.0.13" +description = "Pseudo terminal support for Windows from Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"}, + {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"}, + {file = "pywinpty-2.0.13-cp312-none-win_amd64.whl", hash = "sha256:2fd876b82ca750bb1333236ce98488c1be96b08f4f7647cfdf4129dfad83c2d4"}, + {file = "pywinpty-2.0.13-cp38-none-win_amd64.whl", hash = "sha256:61d420c2116c0212808d31625611b51caf621fe67f8a6377e2e8b617ea1c1f7d"}, + {file = "pywinpty-2.0.13-cp39-none-win_amd64.whl", hash = "sha256:71cb613a9ee24174730ac7ae439fd179ca34ccb8c5349e8d7b72ab5dea2c6f4b"}, + {file = "pywinpty-2.0.13.tar.gz", hash = "sha256:c34e32351a3313ddd0d7da23d27f835c860d32fe4ac814d372a3ea9594f41dde"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyzmq" +version = "26.2.0" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"}, + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"}, + {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"}, + {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"}, + {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"}, + {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"}, + {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"}, + {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"}, + {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"}, + {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"}, + {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2024.9.11" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +files = [ + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, + {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, + {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, + {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, + {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, + {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, + {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, + {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, + {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, + {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, + {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, + {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, + {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, + {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +description = "A utility belt for advanced users of python-requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +description = "A pure python RFC3339 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, + {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +description = "Pure python rfc3986 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, + {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, +] + +[[package]] +name = "rich" +version = "13.9.2" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.20.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, + {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, + {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, + {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, + {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, + {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, + {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, + {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, + {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, + {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, + {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, + {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, + {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, + {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, + {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, +] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "scikit-learn" +version = "1.5.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, + {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.19.5" +scipy = ">=1.6.0" +threadpoolctl = ">=3.1.0" + +[package.extras] +benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==2.5.6)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] + +[[package]] +name = "scipy" +version = "1.14.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, + {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, + {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, + {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, + {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, + {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, + {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, + {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, + {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, + {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "send2trash" +version = "1.8.3" +description = "Send file to trash natively under Mac OS X, Windows and Linux" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, + {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, +] + +[package.extras] +nativelib = ["pyobjc-framework-Cocoa", "pywin32"] +objc = ["pyobjc-framework-Cocoa"] +win32 = ["pywin32"] + +[[package]] +name = "setuptools" +version = "75.1.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, + {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] + +[[package]] +name = "shapely" +version = "2.0.6" +description = "Manipulation and analysis of geometric objects" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29a34e068da2d321e926b5073539fd2a1d4429a2c656bd63f0bd4c8f5b236d0b"}, + {file = "shapely-2.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c84c3f53144febf6af909d6b581bc05e8785d57e27f35ebaa5c1ab9baba13b"}, + {file = "shapely-2.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad2fae12dca8d2b727fa12b007e46fbc522148a584f5d6546c539f3464dccde"}, + {file = "shapely-2.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3304883bd82d44be1b27a9d17f1167fda8c7f5a02a897958d86c59ec69b705e"}, + {file = "shapely-2.0.6-cp310-cp310-win32.whl", hash = "sha256:3ec3a0eab496b5e04633a39fa3d5eb5454628228201fb24903d38174ee34565e"}, + {file = "shapely-2.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:28f87cdf5308a514763a5c38de295544cb27429cfa655d50ed8431a4796090c4"}, + {file = "shapely-2.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5aeb0f51a9db176da9a30cb2f4329b6fbd1e26d359012bb0ac3d3c7781667a9e"}, + {file = "shapely-2.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a7a78b0d51257a367ee115f4d41ca4d46edbd0dd280f697a8092dd3989867b2"}, + {file = "shapely-2.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32c23d2f43d54029f986479f7c1f6e09c6b3a19353a3833c2ffb226fb63a855"}, + {file = "shapely-2.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3dc9fb0eb56498912025f5eb352b5126f04801ed0e8bdbd867d21bdbfd7cbd0"}, + {file = "shapely-2.0.6-cp311-cp311-win32.whl", hash = "sha256:d93b7e0e71c9f095e09454bf18dad5ea716fb6ced5df3cb044564a00723f339d"}, + {file = "shapely-2.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:c02eb6bf4cfb9fe6568502e85bb2647921ee49171bcd2d4116c7b3109724ef9b"}, + {file = "shapely-2.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cec9193519940e9d1b86a3b4f5af9eb6910197d24af02f247afbfb47bcb3fab0"}, + {file = "shapely-2.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83b94a44ab04a90e88be69e7ddcc6f332da7c0a0ebb1156e1c4f568bbec983c3"}, + {file = "shapely-2.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:537c4b2716d22c92036d00b34aac9d3775e3691f80c7aa517c2c290351f42cd8"}, + {file = "shapely-2.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fea108334be345c283ce74bf064fa00cfdd718048a8af7343c59eb40f59726"}, + {file = "shapely-2.0.6-cp312-cp312-win32.whl", hash = "sha256:42fd4cd4834747e4990227e4cbafb02242c0cffe9ce7ef9971f53ac52d80d55f"}, + {file = "shapely-2.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:665990c84aece05efb68a21b3523a6b2057e84a1afbef426ad287f0796ef8a48"}, + {file = "shapely-2.0.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:42805ef90783ce689a4dde2b6b2f261e2c52609226a0438d882e3ced40bb3013"}, + {file = "shapely-2.0.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d2cb146191a47bd0cee8ff5f90b47547b82b6345c0d02dd8b25b88b68af62d7"}, + {file = "shapely-2.0.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3fdef0a1794a8fe70dc1f514440aa34426cc0ae98d9a1027fb299d45741c381"}, + {file = "shapely-2.0.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c665a0301c645615a107ff7f52adafa2153beab51daf34587170d85e8ba6805"}, + {file = "shapely-2.0.6-cp313-cp313-win32.whl", hash = "sha256:0334bd51828f68cd54b87d80b3e7cee93f249d82ae55a0faf3ea21c9be7b323a"}, + {file = "shapely-2.0.6-cp313-cp313-win_amd64.whl", hash = "sha256:d37d070da9e0e0f0a530a621e17c0b8c3c9d04105655132a87cfff8bd77cc4c2"}, + {file = "shapely-2.0.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fa7468e4f5b92049c0f36d63c3e309f85f2775752e076378e36c6387245c5462"}, + {file = "shapely-2.0.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed5867e598a9e8ac3291da6cc9baa62ca25706eea186117034e8ec0ea4355653"}, + {file = "shapely-2.0.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81d9dfe155f371f78c8d895a7b7f323bb241fb148d848a2bf2244f79213123fe"}, + {file = "shapely-2.0.6-cp37-cp37m-win32.whl", hash = "sha256:fbb7bf02a7542dba55129062570211cfb0defa05386409b3e306c39612e7fbcc"}, + {file = "shapely-2.0.6-cp37-cp37m-win_amd64.whl", hash = "sha256:837d395fac58aa01aa544495b97940995211e3e25f9aaf87bc3ba5b3a8cd1ac7"}, + {file = "shapely-2.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c6d88ade96bf02f6bfd667ddd3626913098e243e419a0325ebef2bbd481d1eb6"}, + {file = "shapely-2.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b3b818c4407eaa0b4cb376fd2305e20ff6df757bf1356651589eadc14aab41b"}, + {file = "shapely-2.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbc783529a21f2bd50c79cef90761f72d41c45622b3e57acf78d984c50a5d13"}, + {file = "shapely-2.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2423f6c0903ebe5df6d32e0066b3d94029aab18425ad4b07bf98c3972a6e25a1"}, + {file = "shapely-2.0.6-cp38-cp38-win32.whl", hash = "sha256:2de00c3bfa80d6750832bde1d9487e302a6dd21d90cb2f210515cefdb616e5f5"}, + {file = "shapely-2.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:3a82d58a1134d5e975f19268710e53bddd9c473743356c90d97ce04b73e101ee"}, + {file = "shapely-2.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:392f66f458a0a2c706254f473290418236e52aa4c9b476a072539d63a2460595"}, + {file = "shapely-2.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eba5bae271d523c938274c61658ebc34de6c4b33fdf43ef7e938b5776388c1be"}, + {file = "shapely-2.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7060566bc4888b0c8ed14b5d57df8a0ead5c28f9b69fb6bed4476df31c51b0af"}, + {file = "shapely-2.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b02154b3e9d076a29a8513dffcb80f047a5ea63c897c0cd3d3679f29363cf7e5"}, + {file = "shapely-2.0.6-cp39-cp39-win32.whl", hash = "sha256:44246d30124a4f1a638a7d5419149959532b99dfa25b54393512e6acc9c211ac"}, + {file = "shapely-2.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:2b542d7f1dbb89192d3512c52b679c822ba916f93479fa5d4fc2fe4fa0b3c9e8"}, + {file = "shapely-2.0.6.tar.gz", hash = "sha256:997f6159b1484059ec239cacaa53467fd8b5564dabe186cd84ac2944663b0bf6"}, +] + +[package.dependencies] +numpy = ">=1.14,<3" + +[package.extras] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "soupsieve" +version = "2.6" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +files = [ + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.35" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "st-annotated-text" +version = "4.0.1" +description = "A simple component to display annotated text in Streamlit apps." +optional = false +python-versions = ">=3.5" +files = [ + {file = "st-annotated-text-4.0.1.tar.gz", hash = "sha256:a8ccb9a35c078ef22c6ebb244a9a0605ce27f1fd699f55939497669081b79630"}, + {file = "st_annotated_text-4.0.1-py3-none-any.whl", hash = "sha256:0a2a72903a5752a55c0acef71bdf92cd225a23a8ae4135cfc213c4538bed432f"}, +] + +[package.dependencies] +htbuilder = "*" + +[[package]] +name = "st-theme" +version = "1.2.3" +description = "A component that returns the active theme of the Streamlit app." +optional = false +python-versions = ">=3.8" +files = [ + {file = "st-theme-1.2.3.tar.gz", hash = "sha256:ca97aece1a48ded6e83fd742c27cb0851e1bce2100ab4b6c37c7b6e003b65b42"}, + {file = "st_theme-1.2.3-py3-none-any.whl", hash = "sha256:0a54d9817dd5f8a6d7b0d071b25ae72eacf536c63a5fb97374923938021b1389"}, +] + +[package.dependencies] +streamlit = ">=1.33" + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "starlette" +version = "0.37.2" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +files = [ + {file = "starlette-0.37.2-py3-none-any.whl", hash = "sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee"}, + {file = "starlette-0.37.2.tar.gz", hash = "sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "streamlit" +version = "1.39.0" +description = "A faster way to build and share data apps" +optional = false +python-versions = "!=3.9.7,>=3.8" +files = [ + {file = "streamlit-1.39.0-py2.py3-none-any.whl", hash = "sha256:a359fc54ed568b35b055ff1d453c320735539ad12e264365a36458aef55a5fba"}, + {file = "streamlit-1.39.0.tar.gz", hash = "sha256:fef9de7983c4ee65c08e85607d7ffccb56b00482b1041fa62f90e4815d39df3a"}, +] + +[package.dependencies] +altair = ">=4.0,<6" +blinker = ">=1.0.0,<2" +cachetools = ">=4.0,<6" +click = ">=7.0,<9" +gitpython = ">=3.0.7,<3.1.19 || >3.1.19,<4" +numpy = ">=1.20,<3" +packaging = ">=20,<25" +pandas = ">=1.4.0,<3" +pillow = ">=7.1.0,<11" +protobuf = ">=3.20,<6" +pyarrow = ">=7.0" +pydeck = ">=0.8.0b4,<1" +requests = ">=2.27,<3" +rich = ">=10.14.0,<14" +tenacity = ">=8.1.0,<10" +toml = ">=0.10.1,<2" +tornado = ">=6.0.3,<7" +typing-extensions = ">=4.3.0,<5" +watchdog = {version = ">=2.1.5,<6", markers = "platform_system != \"Darwin\""} + +[package.extras] +snowflake = ["snowflake-connector-python (>=2.8.0)", "snowflake-snowpark-python[modin] (>=1.17.0)"] + +[[package]] +name = "streamlit-camera-input-live" +version = "0.2.0" +description = "Alternative version of st.camera_input which returns the webcam images live, without any button press needed" +optional = false +python-versions = ">=3.7" +files = [ + {file = "streamlit-camera-input-live-0.2.0.tar.gz", hash = "sha256:20ceb952b98410084176fcfeb9148e02ea29033a88d4a923161ac7890cedae0f"}, + {file = "streamlit_camera_input_live-0.2.0-py3-none-any.whl", hash = "sha256:dacb56cdedbb0d6c07e35a66b755b9145b5023e5c855c64193c3d3e73198e9be"}, +] + +[package.dependencies] +jinja2 = "*" +streamlit = ">=1.2" + +[[package]] +name = "streamlit-card" +version = "1.0.2" +description = "A streamlit component, to make UI cards" +optional = false +python-versions = ">=3.8" +files = [ + {file = "streamlit_card-1.0.2-py3-none-any.whl", hash = "sha256:f5d01ce57d6481eb3ba44e504146f56a7b82907d6700f0c19266ed6381a9c58f"}, + {file = "streamlit_card-1.0.2.tar.gz", hash = "sha256:8001cd5edd8a6e2db36ee81f37dc645f08f78c21a2ba968403176c68b4f33cb1"}, +] + +[package.dependencies] +streamlit = ">=0.63" + +[[package]] +name = "streamlit-embedcode" +version = "0.1.2" +description = "Streamlit component for embedded code snippets" +optional = false +python-versions = ">=3.6" +files = [ + {file = "streamlit-embedcode-0.1.2.tar.gz", hash = "sha256:22a50eb43407bab3d0ed2d4b58e89819da477cd0592ef87edbd373c286712e3a"}, + {file = "streamlit_embedcode-0.1.2-py3-none-any.whl", hash = "sha256:b3c9520c1b48f2eef3c702b5a967f64c9a8ff2ea8e74ebb26c0e9195965bb923"}, +] + +[package.dependencies] +streamlit = ">=0.63" + +[[package]] +name = "streamlit-extras" +version = "0.4.7" +description = "A library to discover, try, install and share Streamlit extras" +optional = false +python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" +files = [ + {file = "streamlit_extras-0.4.7-py3-none-any.whl", hash = "sha256:ee8e04d9dcdaf89d9865f3a6cbba8f7dbbce0cd8c9e5c0610d84a0bd04fc4212"}, + {file = "streamlit_extras-0.4.7.tar.gz", hash = "sha256:c9e4ef1b6dded159ca79f0d87890b5df6fa53cb4caa781cc4bf1250830051a90"}, +] + +[package.dependencies] +entrypoints = ">=0.4" +htbuilder = ">=0.6.2" +markdownlit = ">=0.0.5" +plotly = ">=1.0.0" +prometheus-client = ">=0.14.0" +protobuf = "!=3.20.2" +st-annotated-text = ">=3.0.0" +st-theme = ">=1.0.1" +streamlit = ">=1.0.0" +streamlit-camera-input-live = ">=0.2.0" +streamlit-card = ">=0.0.4" +streamlit-embedcode = ">=0.1.2" +streamlit-faker = ">=0.0.2" +streamlit-image-coordinates = ">=0.1.1,<0.2.0" +streamlit-keyup = ">=0.1.9" +streamlit-toggle-switch = ">=1.0.2" +streamlit-vertical-slider = ">=2.5.5" +validators = ">=0.20.0" + +[[package]] +name = "streamlit-faker" +version = "0.0.3" +description = "streamlit-faker is a library to very easily fake Streamlit commands" +optional = false +python-versions = ">=3.6" +files = [ + {file = "streamlit_faker-0.0.3-py3-none-any.whl", hash = "sha256:caf410867b55b4877d8fe73cc987d089e1938f8e63594f1eb579e28015844215"}, + {file = "streamlit_faker-0.0.3.tar.gz", hash = "sha256:bff0f053aa514a99313a3699746183b41111891c82d6e9b41b1c69a7d719bf2f"}, +] + +[package.dependencies] +faker = "*" +matplotlib = "*" +streamlit = "*" +streamlit-extras = "*" + +[[package]] +name = "streamlit-feedback" +version = "0.1.3" +description = "Streamlit component that allows you to collect user feedback in your apps" +optional = false +python-versions = ">=3.7" +files = [ + {file = "streamlit-feedback-0.1.3.tar.gz", hash = "sha256:90f198a4e116e901aaf08058261d444f0abcd3a365bac8c5d6fc08a3c05e02a2"}, + {file = "streamlit_feedback-0.1.3-py3-none-any.whl", hash = "sha256:533082aa7b0978692527a46ef63d520db50be79d43a436baa6c2d11a1d93fe9d"}, +] + +[package.dependencies] +streamlit = ">=0.63" + +[[package]] +name = "streamlit-image-coordinates" +version = "0.1.9" +description = "Streamlit component that displays an image and returns the coordinates when you click on it" +optional = false +python-versions = ">=3.7" +files = [ + {file = "streamlit_image_coordinates-0.1.9-py3-none-any.whl", hash = "sha256:e577d475707ce8a3f7be1825027af6b4d7b609a456f4b25b794756ed2436ab06"}, + {file = "streamlit_image_coordinates-0.1.9.tar.gz", hash = "sha256:825e1f49053f1363913014a4e9130a03b9ca01fb5f7bd269b17afe8162d2ba37"}, +] + +[package.dependencies] +jinja2 = "*" +streamlit = ">=1.2" + +[[package]] +name = "streamlit-keyup" +version = "0.2.4" +description = "Text input that renders on keyup" +optional = false +python-versions = ">=3.7" +files = [ + {file = "streamlit-keyup-0.2.4.tar.gz", hash = "sha256:ca5a050bcca339f1099ae89d053ed19310ae3d74d764fcd71493a53372819d96"}, + {file = "streamlit_keyup-0.2.4-py3-none-any.whl", hash = "sha256:bdef2ce6307bfed7db1be93a7194dec5d25e06f500edddc9e9094a33f8770578"}, +] + +[package.dependencies] +jinja2 = "*" +streamlit = ">=1.2" + +[[package]] +name = "streamlit-toggle-switch" +version = "1.0.2" +description = "Creates a customizable toggle" +optional = false +python-versions = ">=3.6" +files = [ + {file = "streamlit_toggle_switch-1.0.2-py3-none-any.whl", hash = "sha256:0081212d80d178bda337acf2432425e2016d757f57834b18645d4c5b928d4c0f"}, + {file = "streamlit_toggle_switch-1.0.2.tar.gz", hash = "sha256:991b103cd3448b0f6507f8051777b996a17b4630956d5b6fa13344175b20e572"}, +] + +[package.dependencies] +streamlit = ">=0.63" + +[[package]] +name = "streamlit-vertical-slider" +version = "2.5.5" +description = "Creates a customizable vertical slider" +optional = false +python-versions = ">=3.8" +files = [ + {file = "streamlit_vertical_slider-2.5.5-py3-none-any.whl", hash = "sha256:8182e861444fcd69e05c05e7109a636d459560c249f1addf78b58e525a719cb6"}, + {file = "streamlit_vertical_slider-2.5.5.tar.gz", hash = "sha256:d6854cf81a606f5c021df2037d2c49036df2d03ce5082a5227a2acca8322ca74"}, +] + +[package.dependencies] +streamlit = ">=1.22.0" + +[[package]] +name = "tenacity" +version = "8.5.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "terminado" +version = "0.18.1" +description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"}, + {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"}, +] + +[package.dependencies] +ptyprocess = {version = "*", markers = "os_name != \"nt\""} +pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} +tornado = ">=6.1.0" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] +typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "tiktoken" +version = "0.8.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, + {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, + {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, + {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, + {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, + {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, + {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, + {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, + {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, + {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, + {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, + {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tinycss2" +version = "1.3.0" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, + {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + +[[package]] +name = "tokenizers" +version = "0.20.1" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:439261da7c0a5c88bda97acb284d49fbdaf67e9d3b623c0bfd107512d22787a9"}, + {file = "tokenizers-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03dae629d99068b1ea5416d50de0fea13008f04129cc79af77a2a6392792d93c"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61f561f329ffe4b28367798b89d60c4abf3f815d37413b6352bc6412a359867"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec870fce1ee5248a10be69f7a8408a234d6f2109f8ea827b4f7ecdbf08c9fd15"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d388d1ea8b7447da784e32e3b86a75cce55887e3b22b31c19d0b186b1c677800"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:299c85c1d21135bc01542237979bf25c32efa0d66595dd0069ae259b97fb2dbe"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e96f6c14c9752bb82145636b614d5a78e9cde95edfbe0a85dad0dd5ddd6ec95c"}, + {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc9e95ad49c932b80abfbfeaf63b155761e695ad9f8a58c52a47d962d76e310f"}, + {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f22dee205329a636148c325921c73cf3e412e87d31f4d9c3153b302a0200057b"}, + {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2ffd9a8895575ac636d44500c66dffaef133823b6b25067604fa73bbc5ec09d"}, + {file = "tokenizers-0.20.1-cp310-none-win32.whl", hash = "sha256:2847843c53f445e0f19ea842a4e48b89dd0db4e62ba6e1e47a2749d6ec11f50d"}, + {file = "tokenizers-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:f9aa93eacd865f2798b9e62f7ce4533cfff4f5fbd50c02926a78e81c74e432cd"}, + {file = "tokenizers-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4a717dcb08f2dabbf27ae4b6b20cbbb2ad7ed78ce05a829fae100ff4b3c7ff15"}, + {file = "tokenizers-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f84dad1ff1863c648d80628b1b55353d16303431283e4efbb6ab1af56a75832"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:929c8f3afa16a5130a81ab5079c589226273ec618949cce79b46d96e59a84f61"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d10766473954397e2d370f215ebed1cc46dcf6fd3906a2a116aa1d6219bfedc3"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9300fac73ddc7e4b0330acbdda4efaabf74929a4a61e119a32a181f534a11b47"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ecaf7b0e39caeb1aa6dd6e0975c405716c82c1312b55ac4f716ef563a906969"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5170be9ec942f3d1d317817ced8d749b3e1202670865e4fd465e35d8c259de83"}, + {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f1ae08fa9aea5891cbd69df29913e11d3841798e0bfb1ff78b78e4e7ea0a4"}, + {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ee86d4095d3542d73579e953c2e5e07d9321af2ffea6ecc097d16d538a2dea16"}, + {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:86dcd08da163912e17b27bbaba5efdc71b4fbffb841530fdb74c5707f3c49216"}, + {file = "tokenizers-0.20.1-cp311-none-win32.whl", hash = "sha256:9af2dc4ee97d037bc6b05fa4429ddc87532c706316c5e11ce2f0596dfcfa77af"}, + {file = "tokenizers-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:899152a78b095559c287b4c6d0099469573bb2055347bb8154db106651296f39"}, + {file = "tokenizers-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:407ab666b38e02228fa785e81f7cf79ef929f104bcccf68a64525a54a93ceac9"}, + {file = "tokenizers-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f13a2d16032ebc8bd812eb8099b035ac65887d8f0c207261472803b9633cf3e"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e98eee4dca22849fbb56a80acaa899eec5b72055d79637dd6aa15d5e4b8628c9"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47c1bcdd61e61136087459cb9e0b069ff23b5568b008265e5cbc927eae3387ce"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128c1110e950534426e2274837fc06b118ab5f2fa61c3436e60e0aada0ccfd67"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2e2d47a819d2954f2c1cd0ad51bb58ffac6f53a872d5d82d65d79bf76b9896d"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bdd67a0e3503a9a7cf8bc5a4a49cdde5fa5bada09a51e4c7e1c73900297539bd"}, + {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b93d2e26d04da337ac407acec8b5d081d8d135e3e5066a88edd5bdb5aff89"}, + {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0c6a796ddcd9a19ad13cf146997cd5895a421fe6aec8fd970d69f9117bddb45c"}, + {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3ea919687aa7001a8ff1ba36ac64f165c4e89035f57998fa6cedcfd877be619d"}, + {file = "tokenizers-0.20.1-cp312-none-win32.whl", hash = "sha256:6d3ac5c1f48358ffe20086bf065e843c0d0a9fce0d7f0f45d5f2f9fba3609ca5"}, + {file = "tokenizers-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:b0874481aea54a178f2bccc45aa2d0c99cd3f79143a0948af6a9a21dcc49173b"}, + {file = "tokenizers-0.20.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:96af92e833bd44760fb17f23f402e07a66339c1dcbe17d79a9b55bb0cc4f038e"}, + {file = "tokenizers-0.20.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:65f34e5b731a262dfa562820818533c38ce32a45864437f3d9c82f26c139ca7f"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17f98fccb5c12ab1ce1f471731a9cd86df5d4bd2cf2880c5a66b229802d96145"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8c0fc3542cf9370bf92c932eb71bdeb33d2d4aeeb4126d9fd567b60bd04cb30"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b39356df4575d37f9b187bb623aab5abb7b62c8cb702867a1768002f814800c"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfdad27b0e50544f6b838895a373db6114b85112ba5c0cefadffa78d6daae563"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:094663dd0e85ee2e573126918747bdb40044a848fde388efb5b09d57bc74c680"}, + {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e4cf033a2aa207d7ac790e91adca598b679999710a632c4a494aab0fc3a1b2"}, + {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9310951c92c9fb91660de0c19a923c432f110dbfad1a2d429fbc44fa956bf64f"}, + {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05e41e302c315bd2ed86c02e917bf03a6cf7d2f652c9cee1a0eb0d0f1ca0d32c"}, + {file = "tokenizers-0.20.1-cp37-none-win32.whl", hash = "sha256:212231ab7dfcdc879baf4892ca87c726259fa7c887e1688e3f3cead384d8c305"}, + {file = "tokenizers-0.20.1-cp37-none-win_amd64.whl", hash = "sha256:896195eb9dfdc85c8c052e29947169c1fcbe75a254c4b5792cdbd451587bce85"}, + {file = "tokenizers-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:741fb22788482d09d68e73ece1495cfc6d9b29a06c37b3df90564a9cfa688e6d"}, + {file = "tokenizers-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10be14ebd8082086a342d969e17fc2d6edc856c59dbdbddd25f158fa40eaf043"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:514cf279b22fa1ae0bc08e143458c74ad3b56cd078b319464959685a35c53d5e"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a647c5b7cb896d6430cf3e01b4e9a2d77f719c84cefcef825d404830c2071da2"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cdf379219e1e1dd432091058dab325a2e6235ebb23e0aec8d0508567c90cd01"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ba72260449e16c4c2f6f3252823b059fbf2d31b32617e582003f2b18b415c39"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:910b96ed87316e4277b23c7bcaf667ce849c7cc379a453fa179e7e09290eeb25"}, + {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53975a6694428a0586534cc1354b2408d4e010a3103117f617cbb550299797c"}, + {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:07c4b7be58da142b0730cc4e5fd66bb7bf6f57f4986ddda73833cd39efef8a01"}, + {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b605c540753e62199bf15cf69c333e934077ef2350262af2ccada46026f83d1c"}, + {file = "tokenizers-0.20.1-cp38-none-win32.whl", hash = "sha256:88b3bc76ab4db1ab95ead623d49c95205411e26302cf9f74203e762ac7e85685"}, + {file = "tokenizers-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:d412a74cf5b3f68a90c615611a5aa4478bb303d1c65961d22db45001df68afcb"}, + {file = "tokenizers-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a25dcb2f41a0a6aac31999e6c96a75e9152fa0127af8ece46c2f784f23b8197a"}, + {file = "tokenizers-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a12c3cebb8c92e9c35a23ab10d3852aee522f385c28d0b4fe48c0b7527d59762"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02e18da58cf115b7c40de973609c35bde95856012ba42a41ee919c77935af251"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f326a1ac51ae909b9760e34671c26cd0dfe15662f447302a9d5bb2d872bab8ab"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b4872647ea6f25224e2833b044b0b19084e39400e8ead3cfe751238b0802140"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce6238a3311bb8e4c15b12600927d35c267b92a52c881ef5717a900ca14793f7"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57b7a8880b208866508b06ce365dc631e7a2472a3faa24daa430d046fb56c885"}, + {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a908c69c2897a68f412aa05ba38bfa87a02980df70f5a72fa8490479308b1f2d"}, + {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:da1001aa46f4490099c82e2facc4fbc06a6a32bf7de3918ba798010954b775e0"}, + {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:42c097390e2f0ed0a5c5d569e6669dd4e9fff7b31c6a5ce6e9c66a61687197de"}, + {file = "tokenizers-0.20.1-cp39-none-win32.whl", hash = "sha256:3d4d218573a3d8b121a1f8c801029d70444ffb6d8f129d4cca1c7b672ee4a24c"}, + {file = "tokenizers-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:37d1e6f616c84fceefa7c6484a01df05caf1e207669121c66213cb5b2911d653"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48689da7a395df41114f516208d6550e3e905e1239cc5ad386686d9358e9cef0"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:712f90ea33f9bd2586b4a90d697c26d56d0a22fd3c91104c5858c4b5b6489a79"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:359eceb6a620c965988fc559cebc0a98db26713758ec4df43fb76d41486a8ed5"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d3caf244ce89d24c87545aafc3448be15870096e796c703a0d68547187192e1"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b03cf8b9a32254b1bf8a305fb95c6daf1baae0c1f93b27f2b08c9759f41dee"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:218e5a3561561ea0f0ef1559c6d95b825308dbec23fb55b70b92589e7ff2e1e8"}, + {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f40df5e0294a95131cc5f0e0eb91fe86d88837abfbee46b9b3610b09860195a7"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:08aaa0d72bb65058e8c4b0455f61b840b156c557e2aca57627056624c3a93976"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:998700177b45f70afeb206ad22c08d9e5f3a80639dae1032bf41e8cbc4dada4b"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62f7fbd3c2c38b179556d879edae442b45f68312019c3a6013e56c3947a4e648"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31e87fca4f6bbf5cc67481b562147fe932f73d5602734de7dd18a8f2eee9c6dd"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:956f21d359ae29dd51ca5726d2c9a44ffafa041c623f5aa33749da87cfa809b9"}, + {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1fbbaf17a393c78d8aedb6a334097c91cb4119a9ced4764ab8cfdc8d254dc9f9"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ebe63e31f9c1a970c53866d814e35ec2ec26fda03097c486f82f3891cee60830"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:81970b80b8ac126910295f8aab2d7ef962009ea39e0d86d304769493f69aaa1e"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130e35e76f9337ed6c31be386e75d4925ea807055acf18ca1a9b0eec03d8fe23"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd28a8614f5c82a54ab2463554e84ad79526c5184cf4573bbac2efbbbcead457"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9041ee665d0fa7f5c4ccf0f81f5e6b7087f797f85b143c094126fc2611fec9d0"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:62eb9daea2a2c06bcd8113a5824af8ef8ee7405d3a71123ba4d52c79bb3d9f1a"}, + {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f861889707b54a9ab1204030b65fd6c22bdd4a95205deec7994dc22a8baa2ea4"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:89d5c337d74ea6e5e7dc8af124cf177be843bbb9ca6e58c01f75ea103c12c8a9"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0b7f515c83397e73292accdbbbedc62264e070bae9682f06061e2ddce67cacaf"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0305fc1ec6b1e5052d30d9c1d5c807081a7bd0cae46a33d03117082e91908c"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc611e6ac0fa00a41de19c3bf6391a05ea201d2d22b757d63f5491ec0e67faa"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5ffe0d7f7bfcfa3b2585776ecf11da2e01c317027c8573c78ebcb8985279e23"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e7edb8ec12c100d5458d15b1e47c0eb30ad606a05641f19af7563bc3d1608c14"}, + {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:de291633fb9303555793cc544d4a86e858da529b7d0b752bcaf721ae1d74b2c9"}, + {file = "tokenizers-0.20.1.tar.gz", hash = "sha256:84edcc7cdeeee45ceedb65d518fffb77aec69311c9c8e30f77ad84da3025f002"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomli" +version = "2.0.2" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, +] + +[[package]] +name = "tornado" +version = "6.4.1" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">=3.8" +files = [ + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, + {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, + {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, + {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, +] + +[[package]] +name = "tqdm" +version = "4.66.5" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traceloop-sdk" +version = "0.30.1" +description = "Traceloop Software Development Kit (SDK) for Python" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "traceloop_sdk-0.30.1-py3-none-any.whl", hash = "sha256:14debe169aaa3ad782f4ccb4aff68cf86bdcb527000fb67a4439120d9d393a09"}, + {file = "traceloop_sdk-0.30.1.tar.gz", hash = "sha256:0e9e4c83bdafe84905470b9b1a5880addb2d6511fd7d15b265a86b94fe218d57"}, +] + +[package.dependencies] +colorama = ">=0.4.6,<0.5.0" +deprecated = ">=1.2.14,<2.0.0" +jinja2 = ">=3.1.2,<4.0.0" +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-exporter-otlp-proto-grpc = ">=1.26.0,<2.0.0" +opentelemetry-exporter-otlp-proto-http = ">=1.26.0,<2.0.0" +opentelemetry-instrumentation-alephalpha = "0.30.1" +opentelemetry-instrumentation-anthropic = "0.30.1" +opentelemetry-instrumentation-bedrock = "0.30.1" +opentelemetry-instrumentation-chromadb = "0.30.1" +opentelemetry-instrumentation-cohere = "0.30.1" +opentelemetry-instrumentation-google-generativeai = "0.30.1" +opentelemetry-instrumentation-groq = "0.30.1" +opentelemetry-instrumentation-haystack = "0.30.1" +opentelemetry-instrumentation-lancedb = "0.30.1" +opentelemetry-instrumentation-langchain = "0.30.1" +opentelemetry-instrumentation-llamaindex = "0.30.1" +opentelemetry-instrumentation-marqo = "0.30.1" +opentelemetry-instrumentation-milvus = "0.30.1" +opentelemetry-instrumentation-mistralai = "0.30.1" +opentelemetry-instrumentation-ollama = "0.30.1" +opentelemetry-instrumentation-openai = "0.30.1" +opentelemetry-instrumentation-pinecone = "0.30.1" +opentelemetry-instrumentation-qdrant = "0.30.1" +opentelemetry-instrumentation-replicate = "0.30.1" +opentelemetry-instrumentation-requests = ">=0.48b0,<0.49" +opentelemetry-instrumentation-sqlalchemy = ">=0.48b0,<0.49" +opentelemetry-instrumentation-threading = ">=0.48b0,<0.49" +opentelemetry-instrumentation-together = "0.30.1" +opentelemetry-instrumentation-transformers = "0.30.1" +opentelemetry-instrumentation-urllib3 = ">=0.48b0,<0.49" +opentelemetry-instrumentation-vertexai = "0.30.1" +opentelemetry-instrumentation-watsonx = "0.30.1" +opentelemetry-instrumentation-weaviate = "0.30.1" +opentelemetry-sdk = ">=1.27.0,<2.0.0" +opentelemetry-semantic-conventions-ai = "0.4.1" +posthog = ">3.0.2,<4" +pydantic = ">=1" +tenacity = ">=8.2.3,<9.0.0" + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20241003" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20240917" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, + {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, +] + +[[package]] +name = "types-requests" +version = "2.32.0.20240914" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, +] + +[package.dependencies] +urllib3 = ">=2" + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +description = "RFC 6570 URI Template Processor" +optional = false +python-versions = ">=3.7" +files = [ + {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, + {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, +] + +[package.extras] +dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.30.6" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"}, + {file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.20.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9ebafa0b96c62881d5cafa02d9da2e44c23f9f0cd829f3a32a6aff771449c996"}, + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:35968fc697b0527a06e134999eef859b4034b37aebca537daeb598b9d45a137b"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b16696f10e59d7580979b420eedf6650010a4a9c3bd8113f24a103dfdb770b10"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b04d96188d365151d1af41fa2d23257b674e7ead68cfd61c725a422764062ae"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94707205efbe809dfa3a0d09c08bef1352f5d3d6612a506f10a319933757c006"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89e8d33bb88d7263f74dc57d69f0063e06b5a5ce50bb9a6b32f5fcbe655f9e73"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e50289c101495e0d1bb0bfcb4a60adde56e32f4449a67216a1ab2750aa84f037"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e237f9c1e8a00e7d9ddaa288e535dc337a39bcbf679f290aee9d26df9e72bce9"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:746242cd703dc2b37f9d8b9f173749c15e9a918ddb021575a0205ec29a38d31e"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82edbfd3df39fb3d108fc079ebc461330f7c2e33dbd002d146bf7c445ba6e756"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80dc1b139516be2077b3e57ce1cb65bfed09149e1d175e0478e7a987863b68f0"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f44af67bf39af25db4c1ac27e82e9665717f9c26af2369c404be865c8818dcf"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4b75f2950ddb6feed85336412b9a0c310a2edbcf4cf931aa5cfe29034829676d"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:77fbc69c287596880ecec2d4c7a62346bef08b6209749bf6ce8c22bbaca0239e"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6462c95f48e2d8d4c993a2950cd3d31ab061864d1c226bbf0ee2f1a8f36674b9"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649c33034979273fa71aa25d0fe120ad1777c551d8c4cd2c0c9851d88fcb13ab"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a609780e942d43a275a617c0839d85f95c334bad29c4c0918252085113285b5"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aea15c78e0d9ad6555ed201344ae36db5c63d428818b4b2a42842b3870127c00"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0e94b221295b5e69de57a1bd4aeb0b3a29f61be6e1b478bb8a69a73377db7ba"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fee6044b64c965c425b65a4e17719953b96e065c5b7e09b599ff332bb2744bdf"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:265a99a2ff41a0fd56c19c3838b29bf54d1d177964c300dad388b27e84fd7847"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10c2956efcecb981bf9cfb8184d27d5d64b9033f917115a960b83f11bfa0d6b"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e7d61fe8e8d9335fac1bf8d5d82820b4808dd7a43020c149b63a1ada953d48a6"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2beee18efd33fa6fdb0976e18475a4042cd31c7433c866e8a09ab604c7c22ff2"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8c36fdf3e02cec92aed2d44f63565ad1522a499c654f07935c8f9d04db69e95"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0fac7be202596c7126146660725157d4813aa29a4cc990fe51346f75ff8fde7"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0fba61846f294bce41eb44d60d58136090ea2b5b99efd21cbdf4e21927c56a"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95720bae002ac357202e0d866128eb1ac82545bcf0b549b9abe91b5178d9b541"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:36c530d8fa03bfa7085af54a48f2ca16ab74df3ec7108a46ba82fd8b411a2315"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e97152983442b499d7a71e44f29baa75b3b02e65d9c44ba53b10338e98dedb66"}, + {file = "uvloop-0.20.0.tar.gz", hash = "sha256:4603ca714a754fc8d9b197e325db25b2ea045385e8a3ad05d3463de725fdf469"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "validators" +version = "0.34.0" +description = "Python Data Validation for Humans™" +optional = false +python-versions = ">=3.8" +files = [ + {file = "validators-0.34.0-py3-none-any.whl", hash = "sha256:c804b476e3e6d3786fa07a30073a4ef694e617805eb1946ceee3fe5a9b8b1321"}, + {file = "validators-0.34.0.tar.gz", hash = "sha256:647fe407b45af9a74d245b943b18e6a816acf4926974278f6dd617778e1e781f"}, +] + +[package.extras] +crypto-eth-addresses = ["eth-hash[pycryptodome] (>=0.7.0)"] + +[[package]] +name = "watchdog" +version = "5.0.3" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +files = [ + {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"}, + {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"}, + {file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"}, + {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"}, + {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"}, + {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"}, + {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"}, + {file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"}, + {file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"}, + {file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"}, + {file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "watchfiles" +version = "0.24.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, + {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, + {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec"}, + {file = "watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d"}, + {file = "watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c"}, + {file = "watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968"}, + {file = "watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444"}, + {file = "watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896"}, + {file = "watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, + {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, + {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ee82c98bed9d97cd2f53bdb035e619309a098ea53ce525833e26b93f673bc318"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd92bbaa2ecdb7864b7600dcdb6f2f1db6e0346ed425fbd01085be04c63f0b05"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83df90191d67af5a831da3a33dd7628b02a95450e168785586ed51e6d28943c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fca9433a45f18b7c779d2bae7beeec4f740d28b788b117a48368d95a3233ed83"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b995bfa6bf01a9e09b884077a6d37070464b529d8682d7691c2d3b540d357a0c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9aba6e01ff6f2e8285e5aa4154e2970068fe0fc0998c4380d0e6278222269b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5171ef898299c657685306d8e1478a45e9303ddcd8ac5fed5bd52ad4ae0b69b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4933a508d2f78099162da473841c652ad0de892719043d3f07cc83b33dfd9d91"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95cf3b95ea665ab03f5a54765fa41abf0529dbaf372c3b83d91ad2cfa695779b"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:01def80eb62bd5db99a798d5e1f5f940ca0a05986dcfae21d833af7a46f7ee22"}, + {file = "watchfiles-0.24.0-cp38-none-win32.whl", hash = "sha256:4d28cea3c976499475f5b7a2fec6b3a36208656963c1a856d328aeae056fc5c1"}, + {file = "watchfiles-0.24.0-cp38-none-win_amd64.whl", hash = "sha256:21ab23fdc1208086d99ad3f69c231ba265628014d4aed31d4e8746bd59e88cd1"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b665caeeda58625c3946ad7308fbd88a086ee51ccb706307e5b1fa91556ac886"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c51749f3e4e269231510da426ce4a44beb98db2dce9097225c338f815b05d4f"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b2509f08761f29a0fdad35f7e1638b8ab1adfa2666d41b794090361fb8b855"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a60e2bf9dc6afe7f743e7c9b149d1fdd6dbf35153c78fe3a14ae1a9aee3d98b"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d9b87c4c55e3ea8881dfcbf6d61ea6775fffed1fedffaa60bd047d3c08c430"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78470906a6be5199524641f538bd2c56bb809cd4bf29a566a75051610bc982c3"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07cdef0c84c03375f4e24642ef8d8178e533596b229d32d2bbd69e5128ede02a"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d337193bbf3e45171c8025e291530fb7548a93c45253897cd764a6a71c937ed9"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ec39698c45b11d9694a1b635a70946a5bad066b593af863460a8e600f0dff1ca"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e28d91ef48eab0afb939fa446d8ebe77e2f7593f5f463fd2bb2b14132f95b6e"}, + {file = "watchfiles-0.24.0-cp39-none-win32.whl", hash = "sha256:7138eff8baa883aeaa074359daabb8b6c1e73ffe69d5accdc907d62e50b1c0da"}, + {file = "watchfiles-0.24.0-cp39-none-win_amd64.whl", hash = "sha256:b3ef2c69c655db63deb96b3c3e587084612f9b1fa983df5e0c3379d41307467f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:96619302d4374de5e2345b2b622dc481257a99431277662c30f606f3e22f42be"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:85d5f0c7771dcc7a26c7a27145059b6bb0ce06e4e751ed76cdf123d7039b60b5"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951088d12d339690a92cef2ec5d3cfd957692834c72ffd570ea76a6790222777"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fb58bcaa343fedc6a9e91f90195b20ccb3135447dc9e4e2570c3a39565853e"}, + {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "webcolors" +version = "24.8.0" +description = "A library for working with the color formats defined by HTML and CSS." +optional = false +python-versions = ">=3.8" +files = [ + {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"}, + {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"}, +] + +[package.extras] +docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] +tests = ["coverage[toml]"] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "websockets" +version = "13.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, +] + +[[package]] +name = "widgetsnbextension" +version = "4.0.13" +description = "Jupyter interactive widgets for Jupyter Notebook" +optional = false +python-versions = ">=3.7" +files = [ + {file = "widgetsnbextension-4.0.13-py3-none-any.whl", hash = "sha256:74b2692e8500525cc38c2b877236ba51d34541e6385eeed5aec15a70f88a6c71"}, + {file = "widgetsnbextension-4.0.13.tar.gz", hash = "sha256:ffcb67bc9febd10234a362795f643927f4e0c05d9342c727b65d2384f8feacb6"}, +] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "yarl" +version = "1.14.0" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"}, + {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"}, + {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"}, + {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"}, + {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"}, + {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"}, + {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"}, + {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"}, + {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"}, + {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"}, + {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"}, + {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"}, + {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"}, + {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"}, + {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"}, + {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"}, + {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"}, + {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"}, + {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"}, + {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"}, + {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"}, + {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"}, + {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"}, + {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"}, + {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"}, + {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"}, + {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"}, + {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"}, + {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"}, + {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"}, + {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"}, + {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"}, + {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"}, + {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"}, + {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"}, + {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"}, + {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"}, + {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"}, + {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"}, + {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"}, + {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"}, + {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"}, + {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"}, + {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"}, + {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"}, + {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"}, + {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"}, + {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"}, + {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"}, + {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"}, + {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"}, + {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"}, + {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"}, + {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"}, + {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"}, + {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"}, + {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"}, + {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"}, + {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"}, + {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"}, + {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:91d875f75fabf76b3018c5f196bf3d308ed2b49ddcb46c1576d6b075754a1393"}, + {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4009def9be3a7e5175db20aa2d7307ecd00bbf50f7f0f989300710eee1d0b0b9"}, + {file = "yarl-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:582cedde49603f139be572252a318b30dc41039bc0b8165f070f279e5d12187f"}, + {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbd9ff43a04f8ffe8a959a944c2dca10d22f5f99fc6a459f49c3ebfb409309d9"}, + {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f805e37ed16cc212fdc538a608422d7517e7faf539bedea4fe69425bc55d76"}, + {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95e16e9eaa2d7f5d87421b8fe694dd71606aa61d74b824c8d17fc85cc51983d1"}, + {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:816d24f584edefcc5ca63428f0b38fee00b39fe64e3c5e558f895a18983efe96"}, + {file = "yarl-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd2660c01367eb3ef081b8fa0a5da7fe767f9427aa82023a961a5f28f0d4af6c"}, + {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:94b2bb9bcfd5be9d27004ea4398fb640373dd0c1a9e219084f42c08f77a720ab"}, + {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c2089a9afef887664115f7fa6d3c0edd6454adaca5488dba836ca91f60401075"}, + {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2192f718db4a8509f63dd6d950f143279211fa7e6a2c612edc17d85bf043d36e"}, + {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:8385ab36bf812e9d37cf7613999a87715f27ef67a53f0687d28c44b819df7cb0"}, + {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b4c1ecba93e7826dc71ddba75fb7740cdb52e7bd0be9f03136b83f54e6a1f511"}, + {file = "yarl-1.14.0-cp38-cp38-win32.whl", hash = "sha256:e749af6c912a7bb441d105c50c1a3da720474e8acb91c89350080dd600228f0e"}, + {file = "yarl-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:147e36331f6f63e08a14640acf12369e041e0751bb70d9362df68c2d9dcf0c87"}, + {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a9f917966d27f7ce30039fe8d900f913c5304134096554fd9bea0774bcda6d1"}, + {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a2f8fb7f944bcdfecd4e8d855f84c703804a594da5123dd206f75036e536d4d"}, + {file = "yarl-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f4e475f29a9122f908d0f1f706e1f2fc3656536ffd21014ff8a6f2e1b14d1d8"}, + {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8089d4634d8fa2b1806ce44fefa4979b1ab2c12c0bc7ef3dfa45c8a374811348"}, + {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b16f6c75cffc2dc0616ea295abb0e1967601bd1fb1e0af6a1de1c6c887f3439"}, + {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498b3c55087b9d762636bca9b45f60d37e51d24341786dc01b81253f9552a607"}, + {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f8bfc1db82589ef965ed234b87de30d140db8b6dc50ada9e33951ccd8ec07a"}, + {file = "yarl-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625f207b1799e95e7c823f42f473c1e9dbfb6192bd56bba8695656d92be4535f"}, + {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:781e2495e408a81e4eaeedeb41ba32b63b1980dddf8b60dbbeff6036bcd35049"}, + {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:659603d26d40dd4463200df9bfbc339fbfaed3fe32e5c432fe1dc2b5d4aa94b4"}, + {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4e0d45ebf975634468682c8bec021618b3ad52c37619e5c938f8f831fa1ac5c0"}, + {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2e4725a08cb2b4794db09e350c86dee18202bb8286527210e13a1514dc9a59a"}, + {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:19268b4fec1d7760134f2de46ef2608c2920134fb1fa61e451f679e41356dc55"}, + {file = "yarl-1.14.0-cp39-cp39-win32.whl", hash = "sha256:337912bcdcf193ade64b9aae5a4017a0a1950caf8ca140362e361543c6773f21"}, + {file = "yarl-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6d0147574ce2e7b812c989e50fa72bbc5338045411a836bd066ce5fc8ac0bce"}, + {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"}, + {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[[package]] +name = "zipp" +version = "3.20.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "f3b44fd0947cbca385b2474cd6209435ffc35ff183eb0a05aa621d563860a151" diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml new file mode 100644 index 0000000000..4808cac87e --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml @@ -0,0 +1,108 @@ +[tool.poetry] +name = "starter-app-sample" +version = "0.1.0" +description = "" +authors = ["Your Name "] +packages = [ + { include = "app" }, +] + +[tool.poetry.dependencies] +python = "^3.10" +traceloop-sdk = "^0.30.1" +langchain-google-vertexai = "2.0.3" +opentelemetry-exporter-gcp-trace = "^1.6.0" +opentelemetry-sdk = "^1.25.0" +google-cloud-logging = "^3.10.0" +langchain = "^0.3.0" +google-cloud-aiplatform = {extras = ["rapid-evaluation"], version = "^1.59.0"} +scikit-learn = "^1.5.0" +fastapi = "0.110.3" +langchain-google-community = {extras = ["vertexaisearch"], version = "^2.0.0"} +pypdf = "^4.3.1" +grpcio = "1.64.1" +langgraph = "^0.2.21" +uvicorn = {extras = ["standard"], version = "^0.30.5"} +immutabledict = "^4.2.0" +types-pyyaml = "^6.0.12.20240917" +types-requests = "^2.32.0.20240914" +langchain-core = "^0.3.9" + + +[tool.poetry.group.dev.dependencies] +pytest = "^8.3.2" +pytest-asyncio = "^0.23.8" +nest-asyncio = "^1.6.0" + + +[tool.poetry.group.streamlit] +optional = true + +[tool.poetry.group.streamlit.dependencies] +streamlit = "^1.35.0" +streamlit-extras = "^0.4.3" +extra-streamlit-components = "^0.1.71" +streamlit-feedback = "^0.1.3" + + +[tool.poetry.group.jupyter] +optional = true + +[tool.poetry.group.jupyter.dependencies] +jupyter = "^1.0.0" + + +[tool.poetry.group.lint] +optional = true + +[tool.poetry.group.lint.dependencies] +flake8 = "^7.1.1" +flake8-pyproject = "^1.2.3" +mypy = "^1" +codespell = "^2.2.0" +black = "^24.8.0" + + +[tool.mypy] +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +no_implicit_optional = true +check_untyped_defs = true +disallow_subclassing_any = true +warn_incomplete_stub = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_unreachable = true +follow_imports = "silent" +ignore_missing_imports = true +explicit_package_bases = true +disable_error_code = ["misc", "no-untyped-call", "no-any-return"] + +[tool.codespell] +ignore-words-list = "rouge" +skip = "./locust_env/*,poetry.lock" + +[tool.flake8] +ignore = ["E501", "W503"] + +[tool.pylint] +disable = [ + "E0401", + "C0301", + "R0903", + "R1710", + "C0114", + "R0915", + "W1514", + "W1203", + "C0411", +] + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = "." \ No newline at end of file diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/side_bar.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/side_bar.py new file mode 100644 index 0000000000..ab798af500 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/side_bar.py @@ -0,0 +1,171 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0201, E0611 + +import os +from typing import Any +import uuid + +from utils.chat_utils import save_chat +from utils.multimodal_utils import ( + HELP_GCS_CHECKBOX, + HELP_MESSAGE_MULTIMODALITY, + upload_files_to_gcs, +) + +EMPTY_CHAT_NAME = "Empty chat" +NUM_CHAT_IN_RECENT = 3 +DEFAULT_BASE_URL = "http://localhost:8000/" + + +class SideBar: + """Manages the sidebar components of the Streamlit application.""" + + def __init__(self, st: Any) -> None: + """ + Initialize the SideBar. + + Args: + st (Any): The Streamlit object for rendering UI components. + """ + self.st = st + + def init_side_bar(self) -> None: + """Initialize and render the sidebar components.""" + with self.st.sidebar: + self.url_input_field = self.st.text_input( + label="Service URL", + value=os.environ.get("SERVICE_URL", DEFAULT_BASE_URL), + ) + self.should_authenticate_request = self.st.checkbox( + label="Authenticate request", + value=False, + help="If checked, any request to the server will contain an" + "Identity token to allow authentication. " + "See the Cloud Run documentation to know more about authentication:" + "https://cloud.google.com/run/docs/authenticating/service-to-service", + ) + col1, col2, col3 = self.st.columns(3) + with col1: + if self.st.button("+ New chat"): + if ( + len( + self.st.session_state.user_chats[ + self.st.session_state["session_id"] + ]["messages"] + ) + > 0 + ): + self.st.session_state.run_id = None + + self.st.session_state["session_id"] = str(uuid.uuid4()) + self.st.session_state.session_db.get_session( + session_id=self.st.session_state["session_id"], + ) + self.st.session_state.user_chats[ + self.st.session_state["session_id"] + ] = { + "title": EMPTY_CHAT_NAME, + "messages": [], + } + + with col2: + if self.st.button("Delete chat"): + self.st.session_state.run_id = None + self.st.session_state.session_db.clear() + self.st.session_state.user_chats.pop( + self.st.session_state["session_id"] + ) + if len(self.st.session_state.user_chats) > 0: + chat_id = list(self.st.session_state.user_chats.keys())[0] + self.st.session_state["session_id"] = chat_id + self.st.session_state.session_db.get_session( + session_id=self.st.session_state["session_id"], + ) + else: + self.st.session_state["session_id"] = str(uuid.uuid4()) + self.st.session_state.user_chats[ + self.st.session_state["session_id"] + ] = { + "title": EMPTY_CHAT_NAME, + "messages": [], + } + with col3: + if self.st.button("Save chat"): + save_chat(self.st) + + self.st.subheader("Recent") # Style the heading + + all_chats = list(reversed(self.st.session_state.user_chats.items())) + for chat_id, chat in all_chats[:NUM_CHAT_IN_RECENT]: + if self.st.button(chat["title"], key=chat_id): + self.st.session_state.run_id = None + self.st.session_state["session_id"] = chat_id + self.st.session_state.session_db.get_session( + session_id=self.st.session_state["session_id"], + ) + + with self.st.expander("Other chats"): + for chat_id, chat in all_chats[NUM_CHAT_IN_RECENT:]: + if self.st.button(chat["title"], key=chat_id): + self.st.session_state.run_id = None + self.st.session_state["session_id"] = chat_id + self.st.session_state.session_db.get_session( + session_id=self.st.session_state["session_id"], + ) + + self.st.divider() + self.st.header("Upload files from local") + bucket_name = self.st.text_input( + label="GCS Bucket for upload", + value=os.environ.get("BUCKET_NAME", "gs://your-bucket-name"), + ) + if "checkbox_state" not in self.st.session_state: + self.st.session_state.checkbox_state = True + + self.st.session_state.checkbox_state = self.st.checkbox( + "Upload to GCS first (suggested)", value=False, help=HELP_GCS_CHECKBOX + ) + + self.uploaded_files = self.st.file_uploader( + label="Send files from local", + accept_multiple_files=True, + key=f"uploader_images_{self.st.session_state.uploader_key}", + type=[ + "png", + "jpg", + "jpeg", + "txt", + "docx", + "pdf", + "rtf", + "csv", + "tsv", + "xlsx", + ], + ) + if self.uploaded_files and self.st.session_state.checkbox_state: + upload_files_to_gcs(self.st, bucket_name, self.uploaded_files) + + self.st.divider() + + self.st.header("Upload files from GCS") + self.gcs_uris = self.st.text_area( + "GCS uris (comma-separated)", + value=self.st.session_state["gcs_uris_to_be_sent"], + key=f"upload_text_area_{self.st.session_state.uploader_key}", + help=HELP_MESSAGE_MULTIMODALITY, + ) + + self.st.caption(f"Note: {HELP_MESSAGE_MULTIMODALITY}") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/streamlit_app.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/streamlit_app.py new file mode 100644 index 0000000000..d9c4af5aad --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/streamlit_app.py @@ -0,0 +1,253 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=E0611 + +from functools import partial +import json +from typing import Any, Dict, List +import uuid + +from langchain_core.messages import HumanMessage +from side_bar import SideBar +import streamlit as st +from streamlit_feedback import streamlit_feedback +from style.app_markdown import MARKDOWN_STR +from utils.local_chat_history import LocalChatMessageHistory +from utils.message_editing import MessageEditing +from utils.multimodal_utils import format_content, get_parts_from_files +from utils.stream_handler import Client, StreamHandler, get_chain_response + +USER = "my_user" +EMPTY_CHAT_NAME = "Empty chat" + + +def setup_page() -> None: + """Configure the Streamlit page settings.""" + st.set_page_config( + page_title="Playground", + layout="wide", + initial_sidebar_state="auto", + menu_items=None, + ) + st.title("Playground") + st.markdown(MARKDOWN_STR, unsafe_allow_html=True) + + +def initialize_session_state() -> None: + """Initialize the session state with default values.""" + if "user_chats" not in st.session_state: + st.session_state["session_id"] = str(uuid.uuid4()) + st.session_state.uploader_key = 0 + st.session_state.run_id = None + st.session_state.user_id = USER + st.session_state["gcs_uris_to_be_sent"] = "" + st.session_state.modified_prompt = None + st.session_state.session_db = LocalChatMessageHistory( + session_id=st.session_state["session_id"], + user_id=st.session_state["user_id"], + ) + st.session_state.user_chats = ( + st.session_state.session_db.get_all_conversations() + ) + st.session_state.user_chats[st.session_state["session_id"]] = { + "title": EMPTY_CHAT_NAME, + "messages": [], + } + + +def display_messages() -> None: + """Display all messages in the current chat session.""" + messages = st.session_state.user_chats[st.session_state["session_id"]]["messages"] + tool_call_input = None + for i, message in enumerate(messages): + if message["type"] in ["ai", "human"] and message["content"]: + display_chat_message(message, i) + elif "tool_calls" in message and message["tool_calls"]: + tool_call_input = handle_tool_call(message) + elif message["type"] == "tool" and tool_call_input is not None: + display_tool_output(tool_call_input, message) + tool_call_input = None + else: + st.error(f"Unexpected message type: {message['type']}") + st.write("Full messages list:", messages) + raise ValueError(f"Unexpected message type: {message['type']}") + + +def display_chat_message(message: Dict[str, Any], index: int) -> None: + """Display a single chat message with edit, refresh, and delete options.""" + chat_message = st.chat_message(message["type"]) + with chat_message: + st.markdown(format_content(message["content"]), unsafe_allow_html=True) + col1, col2, col3 = st.columns([2, 2, 94]) + display_message_buttons(message, index, col1, col2, col3) + + +def display_message_buttons( + message: Dict[str, Any], index: int, col1: Any, col2: Any, col3: Any +) -> None: + """Display edit, refresh, and delete buttons for a chat message.""" + edit_button = f"{index}_edit" + refresh_button = f"{index}_refresh" + delete_button = f"{index}_delete" + content = ( + message["content"] + if isinstance(message["content"], str) + else message["content"][-1]["text"] + ) + + with col1: + st.button(label="✎", key=edit_button, type="primary") + if message["type"] == "human": + with col2: + st.button( + label="⟳", + key=refresh_button, + type="primary", + on_click=partial(MessageEditing.refresh_message, st, index, content), + ) + with col3: + st.button( + label="X", + key=delete_button, + type="primary", + on_click=partial(MessageEditing.delete_message, st, index), + ) + + if st.session_state[edit_button]: + st.text_area( + "Edit your message:", + value=content, + key=f"edit_box_{index}", + on_change=partial(MessageEditing.edit_message, st, index, message["type"]), + ) + + +def handle_tool_call(message: Dict[str, Any]) -> Dict[str, Any]: + """Process a tool call message and return the first tool call.""" + if len(message["tool_calls"]) > 1: + raise ValueError("Expected only one tool call, but found multiple.") + return message["tool_calls"][0] + + +def display_tool_output( + tool_call_input: Dict[str, Any], tool_call_output: Dict[str, Any] +) -> None: + """Display the input and output of a tool call in an expander.""" + tool_expander = st.expander(label="Tool Calls:", expanded=False) + with tool_expander: + msg = ( + f"\n\nEnding tool: `{tool_call_input['name']}` with\n **args:**\n" + f"```\n{json.dumps(tool_call_input['args'], indent=2)}\n```\n" + f"\n\n**output:**\n " + f"```\n{json.dumps(tool_call_output, indent=2)}\n```" + ) + st.markdown(msg, unsafe_allow_html=True) + + +def handle_user_input(side_bar: SideBar) -> None: + """Process user input, generate AI response, and update chat history.""" + prompt = st.chat_input() or st.session_state.modified_prompt + if prompt: + st.session_state.modified_prompt = None + parts = get_parts_from_files( + upload_gcs_checkbox=st.session_state.checkbox_state, + uploaded_files=side_bar.uploaded_files, + gcs_uris=side_bar.gcs_uris, + ) + st.session_state["gcs_uris_to_be_sent"] = "" + parts.append({"type": "text", "text": prompt}) + st.session_state.user_chats[st.session_state["session_id"]]["messages"].append( + HumanMessage(content=parts).model_dump() + ) + + display_user_input(parts) + generate_ai_response( + url_input_field=side_bar.url_input_field, + should_authenticate_request=side_bar.should_authenticate_request, + ) + update_chat_title() + if len(parts) > 1: + st.session_state.uploader_key += 1 + st.rerun() + + +def display_user_input(parts: List[Dict[str, Any]]) -> None: + """Display the user's input in the chat interface.""" + human_message = st.chat_message("human") + with human_message: + existing_user_input = format_content(parts) + st.markdown(existing_user_input, unsafe_allow_html=True) + + +def generate_ai_response( + url_input_field: str, should_authenticate_request: bool +) -> None: + """Generate and display the AI's response to the user's input.""" + ai_message = st.chat_message("ai") + with ai_message: + status = st.status("Generating answer🤖") + stream_handler = StreamHandler(st=st) + client = Client( + url=url_input_field, authenticate_request=should_authenticate_request + ) + get_chain_response(st=st, client=client, stream_handler=stream_handler) + status.update(label="Finished!", state="complete", expanded=False) + + +def update_chat_title() -> None: + """Update the chat title if it's currently empty.""" + if ( + st.session_state.user_chats[st.session_state["session_id"]]["title"] + == EMPTY_CHAT_NAME + ): + st.session_state.session_db.set_title( + st.session_state.user_chats[st.session_state["session_id"]] + ) + st.session_state.session_db.upsert_session( + st.session_state.user_chats[st.session_state["session_id"]] + ) + + +def display_feedback(side_bar: SideBar) -> None: + """Display a feedback component and log the feedback if provided.""" + if st.session_state.run_id is not None: + feedback = streamlit_feedback( + feedback_type="faces", + optional_text_label="[Optional] Please provide an explanation", + key=f"feedback-{st.session_state.run_id}", + ) + if feedback is not None: + client = Client( + url=side_bar.url_input_field, + authenticate_request=side_bar.should_authenticate_request, + ) + client.log_feedback( + feedback_dict=feedback, + run_id=st.session_state.run_id, + ) + + +def main() -> None: + """Main function to set up and run the Streamlit app.""" + setup_page() + initialize_session_state() + side_bar = SideBar(st=st) + side_bar.init_side_bar() + display_messages() + handle_user_input(side_bar=side_bar) + display_feedback(side_bar=side_bar) + + +if __name__ == "__main__": + main() diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/style/app_markdown.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/style/app_markdown.py new file mode 100644 index 0000000000..2cfe527854 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/style/app_markdown.py @@ -0,0 +1,37 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +MARKDOWN_STR = """ + +""" diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/chat_utils.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/chat_utils.py new file mode 100644 index 0000000000..d6c2a2254b --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/chat_utils.py @@ -0,0 +1,69 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=R0801 + +import os +from pathlib import Path +from typing import Any, Dict, List, Union + +import yaml + +SAVED_CHAT_PATH = str(os.getcwd()) + "/.saved_chats" + + +def clean_text(text: str) -> str: + """Preprocess the input text by removing leading and trailing newlines.""" + if not text: + return text + + if text.startswith("\n"): + text = text[1:] + if text.endswith("\n"): + text = text[:-1] + return text + + +def sanitize_messages( + messages: List[Dict[str, Union[str, List[Dict[str, str]]]]] +) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]: + """Preprocess and fix the content of messages.""" + for message in messages: + if isinstance(message["content"], list): + for part in message["content"]: + if part["type"] == "text": + part["text"] = clean_text(part["text"]) + else: + message["content"] = clean_text(message["content"]) + return messages + + +def save_chat(st: Any) -> None: + """Save the current chat session to a YAML file.""" + Path(SAVED_CHAT_PATH).mkdir(parents=True, exist_ok=True) + session_id = st.session_state["session_id"] + session = st.session_state.user_chats[session_id] + messages = session.get("messages", []) + if len(messages) > 0: + session["messages"] = sanitize_messages(session["messages"]) + filename = f"{session_id}.yaml" + with open(Path(SAVED_CHAT_PATH) / filename, "w") as file: + yaml.dump( + [session], + file, + allow_unicode=True, + default_flow_style=False, + encoding="utf-8", + ) + st.toast(f"Chat saved to path: ↓ {Path(SAVED_CHAT_PATH) / filename}") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/local_chat_history.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/local_chat_history.py new file mode 100644 index 0000000000..3f1581eebc --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/local_chat_history.py @@ -0,0 +1,121 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=E0611 + +from datetime import datetime +import os +from typing import Dict + +from langchain_core.chat_history import BaseChatMessageHistory +from utils.title_summary import chain_title +import yaml + + +class LocalChatMessageHistory(BaseChatMessageHistory): + """Manages local storage and retrieval of chat message history.""" + + def __init__( + self, + user_id: str, + session_id: str = "default", + base_dir: str = ".streamlit_chats", + ) -> None: + self.user_id = user_id + self.session_id = session_id + self.base_dir = base_dir + self.user_dir = os.path.join(self.base_dir, self.user_id) + self.session_file = os.path.join(self.user_dir, f"{session_id}.yaml") + + os.makedirs(self.user_dir, exist_ok=True) + + def get_session(self, session_id: str) -> None: + """Updates the session ID and file path for the current session.""" + self.session_id = session_id + self.session_file = os.path.join(self.user_dir, f"{session_id}.yaml") + + def get_all_conversations(self) -> Dict[str, Dict]: + """Retrieves all conversations for the current user.""" + conversations = {} + for filename in os.listdir(self.user_dir): + if filename.endswith(".yaml"): + file_path = os.path.join(self.user_dir, filename) + with open(file_path, "r") as f: + conversation = yaml.safe_load(f) + if not isinstance(conversation, list) or len(conversation) > 1: + raise ValueError( + f"""Invalid format in {file_path}. + YAML file can only contain one conversation with the following + structure. + - messages: + - content: [message text] + - type: (human or ai)""" + ) + conversation = conversation[0] + if "title" not in conversation: + conversation["title"] = filename + conversations[filename[:-5]] = conversation + return dict( + sorted(conversations.items(), key=lambda x: x[1].get("update_time", "")) + ) + + def upsert_session(self, session: Dict) -> None: + """Updates or inserts a session into the local storage.""" + session["update_time"] = datetime.now().isoformat() + with open(self.session_file, "w") as f: + yaml.dump( + [session], + f, + allow_unicode=True, + default_flow_style=False, + encoding="utf-8", + ) + + def set_title(self, session: Dict) -> None: + """ + Set the title for the given session. + + This method generates a title for the session based on its messages. + If the session has messages, it appends a special message to prompt + for title creation, generates the title using a title chain, and + updates the session with the new title. + + Args: + session (dict): A dictionary containing session information, + including messages. + + Returns: + None + """ + if session["messages"]: + messages = session["messages"] + [ + { + "type": "human", + "content": "End of conversation - Create one single title", + } + ] + # Remove the tool calls from conversation + messages = [ + msg + for msg in messages + if msg["type"] in ("ai", "human") and msg["content"] + ] + + title = chain_title.invoke(messages).content.strip() + session["title"] = title + self.upsert_session(session) + + def clear(self) -> None: + """Removes the current session file if it exists.""" + if os.path.exists(self.session_file): + os.remove(self.session_file) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/message_editing.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/message_editing.py new file mode 100644 index 0000000000..5e913dc95f --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/message_editing.py @@ -0,0 +1,58 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# fmt: off + +from typing import Any + + +class MessageEditing: + """Provides methods for editing, refreshing, and deleting chat messages.""" + + @staticmethod + def edit_message(st: Any, button_idx: int, message_type: str) -> None: + """Edit a message in the chat history.""" + button_id = f"edit_box_{button_idx}" + if message_type == "human": + messages = st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] + st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] = messages[:button_idx] + st.session_state.modified_prompt = st.session_state[button_id] + else: + st.session_state.user_chats[st.session_state["session_id"]]["messages"][ + button_idx + ]["content"] = st.session_state[button_id] + + @staticmethod + def refresh_message(st: Any, button_idx: int, content: str) -> None: + """Refresh a message in the chat history.""" + messages = st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] + st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] = messages[:button_idx] + st.session_state.modified_prompt = content + + @staticmethod + def delete_message(st: Any, button_idx: int) -> None: + """Delete a message from the chat history.""" + messages = st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] + st.session_state.user_chats[st.session_state["session_id"]][ + "messages" + ] = messages[:button_idx] diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/multimodal_utils.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/multimodal_utils.py new file mode 100644 index 0000000000..4de59d96e6 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/multimodal_utils.py @@ -0,0 +1,218 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0718 + +import base64 +from typing import Any, Dict, List, Optional, Union +from urllib.parse import quote + +from google.cloud import storage + +HELP_MESSAGE_MULTIMODALITY = ( + "For Gemini models to access the URIs you provide, store them in " + "Google Cloud Storage buckets within the same project used by Gemini." +) + +HELP_GCS_CHECKBOX = ( + "Enabling GCS upload will increase the app observability by avoiding" + " forwarding and logging large byte strings within the app." +) + + +def format_content(content: Union[str, List[Dict[str, Any]]]) -> str: + """Formats content as a string, handling both text and multimedia inputs.""" + if isinstance(content, str): + return content + if len(content) == 1 and content[0]["type"] == "text": + return content[0]["text"] + markdown = """Media: +""" + text = "" + for part in content: + if part["type"] == "text": + text = part["text"] + # Local Images: + if part["type"] == "image_url": + image_url = part["image_url"]["url"] + image_markdown = f'' + markdown = ( + markdown + + f""" +- {image_markdown} +""" + ) + if part["type"] == "media": + # Local other media + if "data" in part: + markdown = markdown + f"- Local media: {part['file_name']}\n" + # From GCS: + if "file_uri" in part: + # GCS images + if "image" in part["mime_type"]: + image_url = gs_uri_to_https_url(part["file_uri"]) + image_markdown = f'' + markdown = ( + markdown + + f""" +- {image_markdown} +""" + ) + # GCS other media + else: + image_url = gs_uri_to_https_url(part["file_uri"]) + markdown = ( + markdown + f"- Remote media: " + f"[{part['file_uri']}]({image_url})\n" + ) + markdown = ( + markdown + + f""" + +{text}""" + ) + return markdown + + +def get_gcs_blob_mime_type(gcs_uri: str) -> Optional[str]: + """Fetches the MIME type (content type) of a Google Cloud Storage blob. + + Args: + gcs_uri (str): The GCS URI of the blob in the format "gs://bucket-name/object-name". + + Returns: + str: The MIME type of the blob (e.g., "image/jpeg", "text/plain") if found, + or None if the blob does not exist or an error occurs. + """ + storage_client = storage.Client() + + try: + bucket_name, object_name = gcs_uri.replace("gs://", "").split("/", 1) + + bucket = storage_client.bucket(bucket_name) + blob = bucket.blob(object_name) + blob.reload() + return blob.content_type + except Exception as e: + print(f"Error retrieving MIME type for {gcs_uri}: {e}") + return None # Indicate failure + + +def get_parts_from_files( + upload_gcs_checkbox: bool, uploaded_files: List[Any], gcs_uris: str +) -> List[Dict[str, Any]]: + """Processes uploaded files and GCS URIs to create a list of content parts.""" + parts = [] + # read from local directly + if not upload_gcs_checkbox: + for uploaded_file in uploaded_files: + im_bytes = uploaded_file.read() + if "image" in uploaded_file.type: + content = { + "type": "image_url", + "image_url": { + "url": f"data:{uploaded_file.type};base64," + f"{base64.b64encode(im_bytes).decode('utf-8')}" + }, + "file_name": uploaded_file.name, + } + else: + content = { + "type": "media", + "data": base64.b64encode(im_bytes).decode("utf-8"), + "file_name": uploaded_file.name, + "mime_type": uploaded_file.type, + } + + parts.append(content) + if gcs_uris != "": + for uri in gcs_uris.split(","): + content = { + "type": "media", + "file_uri": uri, + "mime_type": get_gcs_blob_mime_type(uri), + } + parts.append(content) + return parts + + +def upload_bytes_to_gcs( + bucket_name: str, + blob_name: str, + file_bytes: bytes, + content_type: Optional[str] = None, +) -> str: + """Uploads a bytes object to Google Cloud Storage and returns the GCS URI. + + Args: + bucket_name: The name of the GCS bucket. + blob_name: The desired name for the uploaded file in GCS. + file_bytes: The file's content as a bytes object. + content_type (optional): The MIME type of the file (e.g., "image/png"). + If not provided, GCS will try to infer it. + + Returns: + str: The GCS URI (gs://bucket_name/blob_name) of the uploaded file. + + Raises: + GoogleCloudError: If there's an issue with the GCS operation. + """ + storage_client = storage.Client() + bucket = storage_client.bucket(bucket_name) + blob = bucket.blob(blob_name) + blob.upload_from_string(data=file_bytes, content_type=content_type) + # Construct and return the GCS URI + gcs_uri = f"gs://{bucket_name}/{blob_name}" + return gcs_uri + + +def gs_uri_to_https_url(gs_uri: str) -> str: + """Converts a GS URI to an HTTPS URL without authentication. + + Args: + gs_uri: The GS URI in the format gs:///. + + Returns: + The corresponding HTTPS URL, or None if the GS URI is invalid. + """ + + if not gs_uri.startswith("gs://"): + raise ValueError("Invalid GS URI format") + + gs_uri = gs_uri[5:] + + # Extract bucket and object names, then URL encode the object name + bucket_name, object_name = gs_uri.split("/", 1) + object_name = quote(object_name) + + # Construct the HTTPS URL + https_url = f"https://storage.mtls.cloud.google.com/{bucket_name}/{object_name}" + return https_url + + +def upload_files_to_gcs(st: Any, bucket_name: str, files_to_upload: List[Any]) -> None: + """Upload multiple files to Google Cloud Storage and store URIs in session state.""" + bucket_name = bucket_name.replace("gs://", "") + uploaded_uris = [] + for file in files_to_upload: + if file: + file_bytes = file.read() + gcs_uri = upload_bytes_to_gcs( + bucket_name=bucket_name, + blob_name=file.name, + file_bytes=file_bytes, + content_type=file.type, + ) + uploaded_uris.append(gcs_uri) + st.session_state.uploader_key += 1 + st.session_state["gcs_uris_to_be_sent"] = ",".join(uploaded_uris) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/stream_handler.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/stream_handler.py new file mode 100644 index 0000000000..c173fec474 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/stream_handler.py @@ -0,0 +1,267 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0621,W0613,W3101,E0611 + +import json +from typing import Any, Dict, Generator, List, Optional +from urllib.parse import urljoin + +import google.auth +from google.auth.exceptions import DefaultCredentialsError +import google.auth.transport.requests +import google.oauth2.id_token +from langchain_core.messages import AIMessage, ToolMessage +import requests +import streamlit as st +from utils.multimodal_utils import format_content + + +@st.cache_resource() +class Client: + """A client for streaming events from a server.""" + + def __init__(self, url: str, authenticate_request: bool = False) -> None: + """Initialize the Client with a base URL.""" + self.url = urljoin(url, "stream_events") + self.authenticate_request = authenticate_request + self.creds, _ = google.auth.default() + + if self.authenticate_request: + self.id_token = self.get_id_token(self.url) + + def get_id_token(self, url: str) -> str: + """ + Retrieves an ID token, attempting to use a service-to-service method first and + otherwise using user default credentials. + See more on Cloud Run authentication at this link: + https://cloud.google.com/run/docs/authenticating/service-to-service + Args: + url: The URL to use for the token request. + """ + + auth_req = google.auth.transport.requests.Request() + try: + token = google.oauth2.id_token.fetch_id_token(auth_req, url) + except DefaultCredentialsError: + self.creds.refresh(auth_req) + token = self.creds.id_token + return token + + def log_feedback(self, feedback_dict: Dict[str, Any], run_id: str) -> None: + """Log user feedback for a specific run.""" + score = feedback_dict["score"] + if score == "😞": + score = 0.0 + elif score == "🙁": + score = 0.25 + elif score == "😐": + score = 0.5 + elif score == "🙂": + score = 0.75 + elif score == "😀": + score = 1.0 + feedback_dict["score"] = score + feedback_dict["run_id"] = run_id + feedback_dict["log_type"] = "feedback" + feedback_dict.pop("type") + url = urljoin(self.url, "feedback") + headers = { + "Content-Type": "application/json", + } + if self.authenticate_request: + headers["Authorization"] = f"Bearer {self.id_token}" + requests.post(url, data=json.dumps(feedback_dict), headers=headers) + + def stream_events( + self, data: Dict[str, Any] + ) -> Generator[Dict[str, Any], None, None]: + """Stream events from the server, yielding parsed event data.""" + headers = {"Content-Type": "application/json", "Accept": "text/event-stream"} + if self.authenticate_request: + headers["Authorization"] = f"Bearer {self.id_token}" + with requests.post( + self.url, json={"input": data}, headers=headers, stream=True + ) as response: + for line in response.iter_lines(): + if line: + try: + event = json.loads(line.decode("utf-8")) + yield event + except json.JSONDecodeError: + print(f"Failed to parse event: {line.decode('utf-8')}") + + +class StreamHandler: + """Handles streaming updates to a Streamlit interface.""" + + def __init__(self, st: Any, initial_text: str = "") -> None: + """Initialize the StreamHandler with Streamlit context and initial text.""" + self.st = st + self.tool_expander = st.expander("Tool Calls:", expanded=False) + self.container = st.empty() + self.text = initial_text + self.tools_logs = initial_text + + def new_token(self, token: str) -> None: + """Add a new token to the main text display.""" + self.text += token + self.container.markdown(format_content(self.text), unsafe_allow_html=True) + + def new_status(self, status_update: str) -> None: + """Add a new status update to the tool calls expander.""" + self.tools_logs += status_update + self.tool_expander.markdown(status_update) + + +class EventProcessor: + """Processes events from the stream and updates the UI accordingly.""" + + def __init__(self, st: Any, client: Client, stream_handler: StreamHandler) -> None: + """Initialize the EventProcessor with Streamlit context, client, and stream handler.""" + self.st = st + self.client = client + self.stream_handler = stream_handler + self.final_content = "" + self.tool_calls: List[Dict[str, Any]] = [] + self.additional_kwargs: Dict[str, Any] = {} + self.current_run_id: Optional[str] = None + + def process_events(self) -> None: + """Process events from the stream, handling each event type appropriately.""" + messages = self.st.session_state.user_chats[ + self.st.session_state["session_id"] + ]["messages"] + stream = self.client.stream_events( + data={ + "messages": messages, + "user_id": self.st.session_state["user_id"], + "session_id": self.st.session_state["session_id"], + } + ) + + event_handlers = { + "metadata": self.handle_metadata, + "end": self.handle_end, + "on_tool_start": self.handle_tool_start, + "on_retriever_start": self.handle_tool_start, + "on_tool_end": self.handle_tool_and_retriever_end, + "on_retriever_end": self.handle_tool_and_retriever_end, + "on_chat_model_stream": self.handle_chat_model_stream, + } + + for event in stream: + event_type = str(event.get("event")) + handler = event_handlers.get(event_type) + if handler: + handler(event) + + def handle_metadata(self, event: Dict[str, Any]) -> None: + """Handle metadata events.""" + self.current_run_id = event["data"].get("run_id") + + def handle_tool_start(self, event: Dict[str, Any]) -> None: + """Handle the start of a tool or retriever execution.""" + msg = ( + f"\n\nCalling tool: `{event['name']}` with args: `{event['data']['input']}`" + ) + self.stream_handler.new_status(msg) + + def handle_tool_and_retriever_end(self, event: Dict[str, Any]) -> None: + """Handle the end of a tool execution.""" + data = event["data"] + + # support for on_tool_end event + if isinstance(data["output"], dict): + tool_id = data["output"].get("tool_call_id") + tool_name = data["output"].get("name") + tool_output = data["output"] + + # support for on_retriever_end event + elif isinstance(data["output"], list): + tool_id = event.get("id", "retriever") + tool_name = event.get("name", event["event"]) + tool_output = {"tool_call_id": tool_name, "content": data["output"]} + else: + raise ValueError( + f"Unexpected data type for tool output: {type(data['output'])}" + ) + tool_input = data["input"] + tool_call_input = AIMessage( + content="", + tool_calls=[{"id": tool_id, "name": tool_name, "args": tool_input}], + ) + tool_call_output = ToolMessage(**tool_output) + self.tool_calls.append(tool_call_input.model_dump()) + self.tool_calls.append(tool_call_output.model_dump()) + msg = ( + f"\n\nEnding tool: `{tool_name}` with\n **args:**\n" + f"```\n{json.dumps(tool_input, indent=2)}\n```\n" + f"\n\n**output:**\n " + f"```\n{json.dumps(tool_output, indent=2)}\n```" + ) + self.stream_handler.new_status(msg) + + def handle_chat_model_stream(self, event: Dict[str, Any]) -> None: + """Handle incoming tokens from the chat model stream.""" + data = event["data"] + content = data["chunk"]["content"] + self.additional_kwargs = { + **self.additional_kwargs, + **data["chunk"]["additional_kwargs"], + } + if content and len(content.strip()) > 0: + self.final_content += content + self.stream_handler.new_token(content) + + def handle_end(self, event: Dict[str, Any]) -> None: + """Handle the end of the event stream and finalize the response.""" + final_message = AIMessage( + content=self.final_content, + id=self.current_run_id, + additional_kwargs=self.additional_kwargs, + ).model_dump() + session = self.st.session_state["session_id"] + self.st.session_state.user_chats[session]["messages"] = ( + self.st.session_state.user_chats[session]["messages"] + self.tool_calls + ) + self.st.session_state.user_chats[session]["messages"].append(final_message) + self.st.session_state.run_id = self.current_run_id + + +def get_chain_response(st: Any, client: Client, stream_handler: StreamHandler) -> None: + """Process the chain response update the Streamlit UI. + + This function initiates the event processing for a chain of operations, + involving an AI model's response generation and potential tool calls. + It creates an EventProcessor instance and starts the event processing loop. + + Args: + st (Any): The Streamlit app instance, used for accessing session state + and updating the UI. + client (Client): An instance of the Client class used to stream events + from the server. + stream_handler (StreamHandler): An instance of the StreamHandler class + used to update the Streamlit UI with + streaming content. + + Returns: + None + + Side effects: + - Updates the Streamlit UI with streaming tokens and tool call information. + - Modifies the session state to include the final AI message and run ID. + - Handles various events like chain starts/ends, tool calls, and model outputs. + """ + processor = EventProcessor(st, client, stream_handler) + processor.process_events() diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/title_summary.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/title_summary.py new file mode 100644 index 0000000000..1af15bf7b6 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/streamlit/utils/title_summary.py @@ -0,0 +1,68 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# fmt: off +# ruff: noqa: E501 +# flake8: noqa: W291 + + +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_google_vertexai import ChatVertexAI + +llm = ChatVertexAI(model_name="gemini-1.5-flash-001", temperature=0) + +title_template = ChatPromptTemplate.from_messages( + [("system", """Given a list of messages between a human and AI, come up with a short and relevant title for the conversation. Use up to 10 words. The title needs to be concise. +Examples: +**Input:** +``` +Human: hi, what is the best italian dish? +AI: That's a tough one! Italy has so many amazing dishes, it's hard to pick just one "best." To help me give you a great suggestion, tell me a little more about what you're looking for. +``` +**Output:** Best italian dish + +**Input:** + +``` +Human: How to fix a broken laptop screen? +AI: Fixing a broken laptop screen can be tricky and often requires professional help. However, there are a few things you can try at home before resorting to a repair shop. +``` + +**Output:** Fixing a broken laptop screen + +**Input:** + +``` +Human: Can you write me a poem about the beach? +AI: As the sun dips down below the horizon +And the waves gently kiss the shore, +I sit here and watch the ocean +And feel its power evermore. +``` + +**Output:** Poem about the beach + +**Input:** + +``` +Human: What's the best way to learn to code? +AI: There are many ways to learn to code, and the best method for you will depend on your learning style and goals. +``` + +**Output:** How to learn to code +"""), + + MessagesPlaceholder(variable_name="messages"), + ]) + +chain_title = title_template | llm diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_langgraph_dummy_agent.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_langgraph_dummy_agent.py new file mode 100644 index 0000000000..750363dbea --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_langgraph_dummy_agent.py @@ -0,0 +1,62 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=R0801 + +import logging + +from app.patterns.langgraph_dummy_agent.chain import chain +from app.utils.output_types import OnToolEndEvent +from langchain_core.messages import AIMessageChunk, HumanMessage +import pytest + +CHAIN_NAME = "LangGraph agent" + + +@pytest.mark.asyncio +async def test_langgraph_chain_astream_events() -> None: + """ + Integration testing example for the default LangGraph agent chain. We assert that the chain returns + events, containing AIMessageChunks. + """ + user_message = HumanMessage(f"Test message for {CHAIN_NAME} chain") + input_dict = {"messages": [user_message]} + + events = [event async for event in chain.astream_events(input_dict, version="v2")] + + assert len(events) > 1, ( + f"Expected multiple events for {CHAIN_NAME} chain, " f"got {len(events)}" + ) + on_tool_end_events = [event for event in events if event["event"] == "on_tool_end"] + assert len(on_tool_end_events) == 1, ( + f"Expected exactly one on_tool_end event for {CHAIN_NAME} chain, " + f"got {len(on_tool_end_events)}" + ) + assert isinstance( + OnToolEndEvent.model_validate(on_tool_end_events[0]), OnToolEndEvent + ) + + on_chain_stream_events = [ + event for event in events if event["event"] == "on_chat_model_stream" + ] + + assert on_chain_stream_events, ( + f"Expected at least one on_chat_model_stream event" f" for {CHAIN_NAME} chain" + ) + + for event in on_chain_stream_events: + assert AIMessageChunk.model_validate( + event["data"]["chunk"] + ), f"Invalid AIMessageChunk for {CHAIN_NAME} chain: {event['data']['chunk']}" + + logging.info(f"All assertions passed for {CHAIN_NAME} chain") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_rag_qa.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_rag_qa.py new file mode 100644 index 0000000000..831ac5c579 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/patterns/test_rag_qa.py @@ -0,0 +1,63 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint disable=R0801 + +import logging + +from app.patterns.custom_rag_qa.chain import chain +from app.utils.output_types import OnToolEndEvent +from langchain_core.messages import AIMessageChunk, HumanMessage +import pytest + +CHAIN_NAME = "Rag QA" + + +@pytest.mark.asyncio +async def test_rag_chain_astream_events() -> None: + """ + Integration testing example for the default RAG QA chain. We assert that the chain returns events, + containing AIMessageChunks. + """ + user_message = HumanMessage(f"Test message for {CHAIN_NAME} chain") + input_dict = {"messages": [user_message]} + + events = [event async for event in chain.astream_events(input_dict, version="v2")] + + assert len(events) > 1, ( + f"Expected multiple events for {CHAIN_NAME} chain, " f"got {len(events)}" + ) + + on_tool_end_events = [event for event in events if event["event"] == "on_tool_end"] + assert len(on_tool_end_events) == 1, ( + f"Expected exactly one on_tool_end event for {CHAIN_NAME} chain, " + f"got {len(on_tool_end_events)}" + ) + assert isinstance( + OnToolEndEvent.model_validate(on_tool_end_events[0]), OnToolEndEvent + ) + + on_chain_stream_events = [ + event for event in events if event["event"] == "on_chat_model_stream" + ] + + assert on_chain_stream_events, ( + f"Expected at least one on_chat_model_stream event" f" for {CHAIN_NAME} chain" + ) + + for event in on_chain_stream_events: + assert AIMessageChunk.model_validate( + event["data"]["chunk"] + ), f"Invalid AIMessageChunk for {CHAIN_NAME} chain: {event['data']['chunk']}" + + logging.info(f"All assertions passed for {CHAIN_NAME} chain") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_chain.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_chain.py new file mode 100644 index 0000000000..b311d4480b --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_chain.py @@ -0,0 +1,53 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=R0801 + +import logging + +from app.chain import chain +from langchain_core.messages import AIMessageChunk, HumanMessage +import pytest + +CHAIN_NAME = "Default" + + +@pytest.mark.asyncio +async def test_default_chain_astream_events() -> None: + """ + Integration testing example for the default dummy chain. We assert that the chain returns events, + containing AIMessageChunks. + """ + user_message = HumanMessage(f"Test message for {CHAIN_NAME} chain") + input_dict = {"messages": [user_message]} + + events = [event async for event in chain.astream_events(input_dict, version="v2")] + + assert len(events) > 1, ( + f"Expected multiple events for {CHAIN_NAME} chain, " f"got {len(events)}" + ) + + on_chain_stream_events = [ + event for event in events if event["event"] == "on_chat_model_stream" + ] + + assert on_chain_stream_events, ( + f"Expected at least one on_chat_model_stream event" f" for {CHAIN_NAME} chain" + ) + + for event in on_chain_stream_events: + assert AIMessageChunk.model_validate( + event["data"]["chunk"] + ), f"Invalid AIMessageChunk for {CHAIN_NAME} chain: {event['data']['chunk']}" + + logging.info(f"All assertions passed for {CHAIN_NAME} chain") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_server_e2e.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_server_e2e.py new file mode 100644 index 0000000000..7a2af95762 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/integration/test_server_e2e.py @@ -0,0 +1,177 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0621, W0613, R0801, R1732 + +import json +import logging +import subprocess +import sys +import threading +import time +from typing import Any, Iterator +import uuid + +import pytest +import requests +from requests.exceptions import RequestException + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +BASE_URL = "http://127.0.0.1:8000/" +STREAM_EVENTS_URL = BASE_URL + "stream_events" +FEEDBACK_URL = BASE_URL + "feedback" + +HEADERS = {"Content-Type": "application/json"} + + +def log_output(pipe: Any, log_func: Any) -> None: + """Log the output from the given pipe.""" + for line in iter(pipe.readline, ""): + log_func(line.strip()) + + +def start_server() -> subprocess.Popen[str]: + """Start the FastAPI server using subprocess and log its output.""" + command = [ + sys.executable, + "-m", + "uvicorn", + "app.server:app", + "--host", + "0.0.0.0", + "--port", + "8000", + ] + process = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1 + ) + + # Start threads to log stdout and stderr in real-time + threading.Thread( + target=log_output, args=(process.stdout, logger.info), daemon=True + ).start() + threading.Thread( + target=log_output, args=(process.stderr, logger.error), daemon=True + ).start() + + return process + + +def wait_for_server(timeout: int = 60, interval: int = 1) -> bool: + """Wait for the server to be ready.""" + start_time = time.time() + while time.time() - start_time < timeout: + try: + response = requests.get("http://127.0.0.1:8000/docs", timeout=10) + if response.status_code == 200: + logger.info("Server is ready") + return True + except RequestException: + pass + time.sleep(interval) + logger.error(f"Server did not become ready within {timeout} seconds") + return False + + +@pytest.fixture(scope="session") +def server_fixture(request: Any) -> Iterator[subprocess.Popen[str]]: + """Pytest fixture to start and stop the server for testing.""" + logger.info("Starting server process") + server_process = start_server() + if not wait_for_server(): + pytest.fail("Server failed to start") + logger.info("Server process started") + + def stop_server() -> None: + logger.info("Stopping server process") + server_process.terminate() + server_process.wait() + logger.info("Server process stopped") + + request.addfinalizer(stop_server) + yield server_process + + +def test_chat_stream(server_fixture: subprocess.Popen[str]) -> None: + """Test the chat stream functionality.""" + logger.info("Starting chat stream test") + + data = { + "input": { + "messages": [ + {"type": "human", "content": "Hello, AI!"}, + {"type": "ai", "content": "Hello!"}, + {"type": "human", "content": "What cooking recipes do you suggest?"}, + ], + "user_id": "test-user", + "session_id": "test-session", + } + } + + response = requests.post( + STREAM_EVENTS_URL, headers=HEADERS, json=data, stream=True, timeout=10 + ) + assert response.status_code == 200 + + events = [json.loads(line) for line in response.iter_lines() if line] + logger.info(f"Received {len(events)} events") + + assert len(events) > 2, f"Expected more than 2 events, got {len(events)}." + assert events[0]["event"] == "metadata", ( + f"First event should be 'metadata', " f"got {events[0]['event']}" + ) + assert "run_id" in events[0]["data"], "Missing 'run_id' in metadata" + + event_types = [event["event"] for event in events] + assert "on_chat_model_stream" in event_types, "Missing 'on_chat_model_stream' event" + assert events[-1]["event"] == "end", ( + f"Last event should be 'end', " f"got {events[-1]['event']}" + ) + + logger.info("Test completed successfully") + + +def test_chat_stream_error_handling(server_fixture: subprocess.Popen[str]) -> None: + """Test the chat stream error handling.""" + logger.info("Starting chat stream error handling test") + + data = {"input": [{"type": "invalid_type", "content": "Cause an error"}]} + response = requests.post( + STREAM_EVENTS_URL, headers=HEADERS, json=data, stream=True, timeout=10 + ) + + assert response.status_code == 422, ( + f"Expected status code 422, " f"got {response.status_code}" + ) + logger.info("Error handling test completed successfully") + + +def test_collect_feedback(server_fixture: subprocess.Popen[str]) -> None: + """ + Test the feedback collection endpoint (/feedback) to ensure it properly + logs the received feedback. + """ + # Create sample feedback data + feedback_data = { + "score": 4, + "run_id": str(uuid.uuid4()), + "text": "Great response!", + } + + response = requests.post( + FEEDBACK_URL, json=feedback_data, headers=HEADERS, timeout=10 + ) + assert response.status_code == 200 diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/.results/.placeholder b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/.results/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/README.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/README.md new file mode 100644 index 0000000000..23fda0224a --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/README.md @@ -0,0 +1,79 @@ +# Robust Load Testing for Generative AI Applications + +This directory provides a comprehensive load testing framework for your Generative AI application, leveraging the power of [Locust](http://locust.io), a leading open-source load testing tool. + +## Local Load Testing + +Follow these steps to execute load tests on your local machine: + +**1. Start the FastAPI Server:** + +Launch the FastAPI server in a separate terminal: + +```bash +poetry run uvicorn app.server:app --host 0.0.0.0 --port 8000 --reload +``` + +**2. (In another tab) Create virtual environment with Locust** +Using another terminal tab, This is suggested to avoid conflicts with the existing application python environment. + +```commandline +python3 -m venv locust_env && source locust_env/bin/activate && pip install locust==2.31.1 +``` + +**3. Execute the Load Test:** +Trigger the Locust load test with the following command: + +```bash +locust -f tests/load_test/load_test.py \ +-H http://127.0.0.1:8000 \ +--headless \ +-t 30s -u 60 -r 2 \ +--csv=tests/load_test/.results/results \ +--html=tests/load_test/.results/report.html +``` + +This command initiates a 30-second load test, simulating 2 users spawning per second, reaching a maximum of 60 concurrent users. + +**Results:** + +Comprehensive CSV and HTML reports detailing the load test performance will be generated and saved in the `tests/load_test/.results` directory. + +## Remote Load Testing (Targeting Cloud Run) + +This framework also supports load testing against remote targets, such as a staging Cloud Run instance. This process is seamlessly integrated into the Continuous Delivery pipeline via Cloud Build, as defined in the [pipeline file](cicd/cd/staging.yaml). + +**Prerequisites:** + +- **Dependencies:** Ensure your environment has the same dependencies required for local testing. +- **Cloud Run Invoker Role:** You'll need the `roles/run.invoker` role to invoke the Cloud Run service. + +**Steps:** + +**1. Obtain Cloud Run Service URL:** + +Navigate to the Cloud Run console, select your service, and copy the URL displayed at the top. Set this URL as an environment variable: + +```bash +export RUN_SERVICE_URL=https://your-cloud-run-service-url.run.app +``` + +**2. Obtain ID Token:** + +Retrieve the ID token required for authentication: + +```bash +export _ID_TOKEN=$(gcloud auth print-identity-token -q) +``` + +**3. Execute the Load Test:** +The following command executes the same load test parameters as the local test but targets your remote Cloud Run instance. + +```bash +poetry run locust -f tests/load_test/load_test.py \ +-H $RUN_SERVICE_URL \ +--headless \ +-t 30s -u 60 -r 2 \ +--csv=tests/load_test/.results/results \ +--html=tests/load_test/.results/report.html +``` diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/load_test.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/load_test.py new file mode 100644 index 0000000000..5a4cd5ab8a --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/load_test/load_test.py @@ -0,0 +1,85 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=R0801 + +import json +import os +import time + +from locust import HttpUser, between, task + + +class ChatStreamUser(HttpUser): + """Simulates a user interacting with the chat stream API.""" + + wait_time = between(1, 3) # Wait 1-3 seconds between tasks + + @task + def chat_stream(self) -> None: + """Simulates a chat stream interaction.""" + headers = {"Content-Type": "application/json"} + if os.environ.get("_ID_TOKEN"): + headers["Authorization"] = f'Bearer {os.environ["_ID_TOKEN"]}' + + data = { + "input": { + "messages": [ + {"type": "human", "content": "Hello, AI!"}, + {"type": "ai", "content": "Hello!"}, + {"type": "human", "content": "Who are you?"}, + ], + "user_id": "test-user", + "session_id": "test-session", + } + } + + start_time = time.time() + + with self.client.post( + "/stream_events", + headers=headers, + json=data, + catch_response=True, + name="/stream_events first event", + stream=True, + ) as response: + if response.status_code == 200: + events = [] + for line in response.iter_lines(): + if line: + events.append(json.loads(line)) + if events[-1]["event"] == "end": + break + + end_time = time.time() + total_time = end_time - start_time + + if ( + len(events) > 2 + and events[0]["event"] == "metadata" + and events[-1]["event"] == "end" + ): + response.success() + self.environment.events.request.fire( + request_type="POST", + name="/stream_events end", + response_time=total_time * 1000, # Convert to milliseconds + response_length=len(json.dumps(events)), + response=response, + context={}, + ) + else: + response.failure("Unexpected response structure") + else: + response.failure(f"Unexpected status code: {response.status_code}") diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_server.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_server.py new file mode 100644 index 0000000000..c289f59379 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_server.py @@ -0,0 +1,148 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0707, C0415 + +import json +import logging +import os +from typing import Any, Generator +from unittest.mock import MagicMock, patch + +from app.utils.input_types import InputChat +from google.auth.credentials import Credentials +from httpx import AsyncClient +from langchain_core.messages import HumanMessage +import pytest + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +@pytest.fixture(autouse=True) +def mock_google_cloud_credentials() -> Generator[None, None, None]: + """Mock Google Cloud credentials for testing.""" + with patch.dict( + os.environ, + { + "GOOGLE_APPLICATION_CREDENTIALS": "/path/to/mock/credentials.json", + "GOOGLE_CLOUD_PROJECT_ID": "mock-project-id", + }, + ): + yield + + +@pytest.fixture(autouse=True) +def mock_google_auth_default() -> Generator[None, None, None]: + """Mock the google.auth.default function for testing.""" + mock_credentials = MagicMock(spec=Credentials) + mock_project = "mock-project-id" + + with patch("google.auth.default", return_value=(mock_credentials, mock_project)): + yield + + +@pytest.fixture +def sample_input_chat() -> InputChat: + """ + Fixture to create a sample input chat for testing. + """ + return InputChat( + user_id="test-user", + session_id="test-session", + messages=[HumanMessage(content="What is the meaning of life?")], + ) + + +class AsyncIterator: + """ + A helper class to create asynchronous iterators for testing. + """ + + def __init__(self, seq: list) -> None: + self.iter = iter(seq) + + def __aiter__(self) -> "AsyncIterator": + return self + + async def __anext__(self) -> Any: + try: + return next(self.iter) + except StopIteration: + raise StopAsyncIteration + + +def test_redirect_root_to_docs() -> None: + """ + Test that the root endpoint (/) redirects to the Swagger UI documentation. + """ + from app.server import app + from fastapi.testclient import TestClient + + client = TestClient(app) + response = client.get("/") + assert response.status_code == 200 + assert "Swagger UI" in response.text + + +@pytest.mark.asyncio +async def test_stream_chat_events() -> None: + """ + Test the stream_events endpoint to ensure it correctly handles + streaming responses and generates the expected events. + """ + from app.server import app + + input_data = { + "input": { + "user_id": "test-user", + "session_id": "test-session", + "messages": [ + {"type": "human", "content": "Hello, AI!"}, + {"type": "ai", "content": "Hello!"}, + {"type": "human", "content": "What cooking recipes do you suggest?"}, + ], + } + } + + mock_uuid = "12345678-1234-5678-1234-567812345678" + mock_events = [ + {"event": "on_chat_model_stream", "data": {"content": "Mocked response"}}, + {"event": "on_chat_model_stream", "data": {"content": "Additional response"}}, + ] + + with patch("app.server.chain") as mock_chain: + mock_chain.astream_events.return_value = AsyncIterator(mock_events) + + with patch("uuid.uuid4", return_value=mock_uuid), patch( + "app.server.Traceloop.set_association_properties" + ): + async with AsyncClient(app=app, base_url="http://test") as ac: + response = await ac.post("/stream_events", json=input_data) + + assert response.status_code == 200 + assert response.headers["content-type"] == "text/event-stream; charset=utf-8" + + events = [] + for event in response.iter_lines(): + events.append(json.loads(event)) + + assert len(events) == 4 + assert events[0]["event"] == "metadata" + assert events[0]["data"]["run_id"] == str(mock_uuid) + assert events[1]["event"] == "on_chat_model_stream" + assert events[1]["data"]["content"] == "Mocked response" + assert events[2]["event"] == "on_chat_model_stream" + assert events[2]["data"]["content"] == "Additional response" + assert events[3]["event"] == "end" diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_utils/test_tracing_exporter.py b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_utils/test_tracing_exporter.py new file mode 100644 index 0000000000..f77d1d7a05 --- /dev/null +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/tests/unit/test_utils/test_tracing_exporter.py @@ -0,0 +1,143 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=W0621, W0613, W0212 + +from typing import Any, Generator +from unittest.mock import Mock, patch + +from app.utils.tracing import CloudTraceLoggingSpanExporter +from google.cloud import logging as google_cloud_logging +from google.cloud import storage +from opentelemetry.sdk.trace import ReadableSpan +import pytest + + +@pytest.fixture +def mock_logging_client() -> Mock: + """Create a mock logging client.""" + return Mock(spec=google_cloud_logging.Client) + + +@pytest.fixture +def mock_storage_client() -> Mock: + """Create a mock storage client.""" + return Mock(spec=storage.Client) + + +@pytest.fixture +def mock_credentials() -> Any: + """Create mock credentials.""" + return Mock() + + +@pytest.fixture +def patch_auth(mock_credentials: Any) -> Generator[Mock, None, None]: + """Patch the google.auth.default function.""" + with patch( + "google.auth.default", return_value=(mock_credentials, "project") + ) as mock_auth: + yield mock_auth + + +@pytest.fixture +def patch_clients( + mock_logging_client: Mock, mock_storage_client: Mock +) -> Generator[None, None, None]: + """Patch the logging and storage clients.""" + with patch("google.cloud.logging.Client", return_value=mock_logging_client): + with patch("google.cloud.storage.Client", return_value=mock_storage_client): + yield + + +@pytest.fixture +def exporter( + mock_logging_client: Mock, + mock_storage_client: Mock, + patch_auth: Any, + mock_credentials: Any, + patch_clients: Any, +) -> CloudTraceLoggingSpanExporter: + """Create a CloudTraceLoggingSpanExporter instance for testing.""" + exporter = CloudTraceLoggingSpanExporter( + project_id="test-project", + logging_client=mock_logging_client, + storage_client=mock_storage_client, + bucket_name="test-bucket", + ) + exporter._ensure_bucket_exists = Mock() # type: ignore[method-assign] + return exporter + + +def test_init(exporter: CloudTraceLoggingSpanExporter) -> None: + """Test the initialization of CloudTraceLoggingSpanExporter.""" + assert exporter.project_id == "test-project" + assert exporter.bucket_name == "test-bucket" + assert exporter.debug is False + + +def test_store_in_gcs(exporter: CloudTraceLoggingSpanExporter) -> None: + """Test the store_in_gcs method of CloudTraceLoggingSpanExporter.""" + span_id = "test-span-id" + content = "test-content" + uri = exporter.store_in_gcs(content, span_id) + assert uri == f"gs://test-bucket/spans/{span_id}.json" + exporter.bucket.blob.assert_called_once_with(f"spans/{span_id}.json") + + +@patch("json.dumps") +def test_process_large_attributes_small_payload( + mock_json_dumps: Mock, exporter: CloudTraceLoggingSpanExporter +) -> None: + """Test processing of small payload attributes.""" + mock_json_dumps.return_value = "a" * 100 # Small payload + span_dict = {"attributes": {"key": "value"}} + result = exporter._process_large_attributes(span_dict, "span-id") + assert result == span_dict + + +@patch("json.dumps") +def test_process_large_attributes_large_payload( + mock_json_dumps: Mock, exporter: CloudTraceLoggingSpanExporter +) -> None: + """Test processing of large payload attributes.""" + mock_json_dumps.return_value = "a" * (400 * 1024 + 1) # Large payload + span_dict = { + "attributes": { + "key1": "value1", + "traceloop.association.properties.key2": "value2", + } + } + result = exporter._process_large_attributes(span_dict, "span-id") + assert "uri_payload" in result["attributes"] + assert "url_payload" in result["attributes"] + assert "key1" not in result["attributes"] + assert "traceloop.association.properties.key2" in result["attributes"] + + +@patch.object(CloudTraceLoggingSpanExporter, "_process_large_attributes") +def test_export( + mock_process_large_attributes: Mock, exporter: CloudTraceLoggingSpanExporter +) -> None: + """Test the export method of CloudTraceLoggingSpanExporter.""" + mock_span = Mock(spec=ReadableSpan) + mock_span.get_span_context.return_value.trace_id = 123 + mock_span.get_span_context.return_value.span_id = 456 + mock_span.to_json.return_value = '{"key": "value"}' + + mock_process_large_attributes.return_value = {"processed": "data"} + + exporter.export([mock_span]) + + mock_process_large_attributes.assert_called_once() + exporter.logger.log_struct.assert_called_once() From 43bd792d7e9423dbd742c0f413099c8c750e48f9 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 11 Oct 2024 19:54:27 +0200 Subject: [PATCH 64/76] chore(deps): update actions/setup-python action to v5 (#1244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/setup-python](https://redirect.github.com/actions/setup-python) | action | major | `v4` -> `v5` | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
    actions/setup-python (actions/setup-python) ### [`v5`](https://redirect.github.com/actions/setup-python/compare/v4...v5) [Compare Source](https://redirect.github.com/actions/setup-python/compare/v4...v5)
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). --- .../workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml index 419c68f22a..fd1e5df556 100644 --- a/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml +++ b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install Poetry and dependencies From 9c71d7b36fa107689cef45bc22ca50660a1e88c1 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 11 Oct 2024 19:54:42 +0200 Subject: [PATCH 65/76] chore(deps): update actions/checkout action to v4 (#1243) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/checkout](https://redirect.github.com/actions/checkout) | action | major | `v3` -> `v4` | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
    actions/checkout (actions/checkout) ### [`v4`](https://redirect.github.com/actions/checkout/blob/HEAD/CHANGELOG.md#v417) [Compare Source](https://redirect.github.com/actions/checkout/compare/v3...v4) - Bump the minor-npm-dependencies group across 1 directory with 4 updates by [@​dependabot](https://redirect.github.com/dependabot) in [https://github.com/actions/checkout/pull/1739](https://redirect.github.com/actions/checkout/pull/1739) - Bump actions/checkout from 3 to 4 by [@​dependabot](https://redirect.github.com/dependabot) in [https://github.com/actions/checkout/pull/1697](https://redirect.github.com/actions/checkout/pull/1697) - Check out other refs/\* by commit by [@​orhantoy](https://redirect.github.com/orhantoy) in [https://github.com/actions/checkout/pull/1774](https://redirect.github.com/actions/checkout/pull/1774) - Pin actions/checkout's own workflows to a known, good, stable version. by [@​jww3](https://redirect.github.com/jww3) in [https://github.com/actions/checkout/pull/1776](https://redirect.github.com/actions/checkout/pull/1776)
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). --- .../workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml index fd1e5df556..0fa511479d 100644 --- a/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml +++ b/.github/workflows/_e2e_gen_ai_app_starter_kit__lint_and_test.yaml @@ -15,7 +15,7 @@ jobs: run: working-directory: gemini/sample-apps/e2e-gen-ai-app-starter-pack steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: From f5a51b3d0eb94660c99329aa39968312060e33b3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 13:13:04 -0500 Subject: [PATCH 66/76] chore(deps): bump cookie and express in /gemini/sample-apps/genwealth/api (#1242) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [cookie](https://github.com/jshttp/cookie) to 0.7.1 and updates ancestor dependency [express](https://github.com/expressjs/express). These dependencies need to be updated together. Updates `cookie` from 0.6.0 to 0.7.1
    Release notes

    Sourced from cookie's releases.

    0.7.1

    Fixed

    • Allow leading dot for domain (#174)
      • Although not permitted in the spec, some users expect this to work and user agents ignore the leading dot according to spec
    • Add fast path for serialize without options, use obj.hasOwnProperty when parsing (#172)

    https://github.com/jshttp/cookie/compare/v0.7.0...v0.7.1

    0.7.0

    https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.0

    Commits
    Maintainer changes

    This version was pushed to npm by blakeembrey, a new releaser for cookie since your current version.


    Updates `express` from 4.19.2 to 4.21.1
    Release notes

    Sourced from express's releases.

    4.21.1

    What's Changed

    Full Changelog: https://github.com/expressjs/express/compare/4.21.0...4.21.1

    4.21.0

    What's Changed

    New Contributors

    Full Changelog: https://github.com/expressjs/express/compare/4.20.0...4.21.0

    4.20.0

    What's Changed

    Important

    • IMPORTANT: The default depth level for parsing URL-encoded data is now 32 (previously was Infinity)
    • Remove link renderization in html while using res.redirect

    Other Changes

    ... (truncated)

    Changelog

    Sourced from express's changelog.

    4.21.1 / 2024-10-08

    4.21.0 / 2024-09-11

    • Deprecate res.location("back") and res.redirect("back") magic string
    • deps: serve-static@1.16.2
      • includes send@0.19.0
    • deps: finalhandler@1.3.1
    • deps: qs@6.13.0

    4.20.0 / 2024-09-10

    • deps: serve-static@0.16.0
      • Remove link renderization in html while redirecting
    • deps: send@0.19.0
      • Remove link renderization in html while redirecting
    • deps: body-parser@0.6.0
      • add depth option to customize the depth level in the parser
      • IMPORTANT: The default depth level for parsing URL-encoded data is now 32 (previously was Infinity)
    • Remove link renderization in html while using res.redirect
    • deps: path-to-regexp@0.1.10
      • Adds support for named matching groups in the routes using a regex
      • Adds backtracking protection to parameters without regexes defined
    • deps: encodeurl@~2.0.0
      • Removes encoding of \, |, and ^ to align better with URL spec
    • Deprecate passing options.maxAge and options.expires to res.clearCookie
      • Will be ignored in v5, clearCookie will set a cookie with an expires in the past to instruct clients to delete the cookie
    Commits

    Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/GoogleCloudPlatform/generative-ai/network/alerts).
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../genwealth/api/package-lock.json | 110 ++++++++++-------- gemini/sample-apps/genwealth/api/package.json | 2 +- 2 files changed, 63 insertions(+), 49 deletions(-) diff --git a/gemini/sample-apps/genwealth/api/package-lock.json b/gemini/sample-apps/genwealth/api/package-lock.json index 6cc7460305..56ef847841 100644 --- a/gemini/sample-apps/genwealth/api/package-lock.json +++ b/gemini/sample-apps/genwealth/api/package-lock.json @@ -13,7 +13,7 @@ "@google-cloud/storage": "^7.9.0", "@google-cloud/vertexai": "^1.0.0", "cors": "^2.8.5", - "express": "^4.19.2", + "express": "^4.21.1", "lodash": "^4.17.21", "multer": "^1.4.5-lts.1", "pg": "^8.7.3", @@ -1517,9 +1517,9 @@ } }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -1529,7 +1529,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -1900,9 +1900,9 @@ } }, "node_modules/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", "engines": { "node": ">= 0.6" } @@ -2076,9 +2076,9 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", "engines": { "node": ">= 0.8" } @@ -2202,36 +2202,36 @@ "license": "Apache-2.0" }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.21.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", + "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.6.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -2295,12 +2295,12 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -3280,9 +3280,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/methods": { "version": "1.1.2", @@ -3734,9 +3737,12 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -3965,9 +3971,9 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/pg": { "version": "8.12.0", @@ -4205,11 +4211,11 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -4376,9 +4382,9 @@ } }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -4398,20 +4404,28 @@ "node": ">= 0.8.0" } }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" diff --git a/gemini/sample-apps/genwealth/api/package.json b/gemini/sample-apps/genwealth/api/package.json index e348a5e71c..00f535fba8 100644 --- a/gemini/sample-apps/genwealth/api/package.json +++ b/gemini/sample-apps/genwealth/api/package.json @@ -15,7 +15,7 @@ "@google-cloud/storage": "^7.9.0", "@google-cloud/vertexai": "^1.0.0", "cors": "^2.8.5", - "express": "^4.19.2", + "express": "^4.21.1", "lodash": "^4.17.21", "multer": "^1.4.5-lts.1", "pg": "^8.7.3", From 3e305773f62d8dc99dec4335368c1ec6e086b22c Mon Sep 17 00:00:00 2001 From: Abhishek Bhagwat Date: Fri, 11 Oct 2024 11:18:35 -0700 Subject: [PATCH 67/76] feat: Add Grounded Generation Playground (#1197) # Vertex Grounded Generation Playground This demo showcases how to use Vertex AI [Grounded Generation API](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen) with a Next.js frontend. It provides a user-friendly interface for exploring both chat-based and side-by-side comparisons of grounded and ungrounded responses. This allows you to test different models and grounding sources, including [Google Search](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen#web-grounding) and [Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen#inline-vais). --------- Co-authored-by: Owl Bot Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> Co-authored-by: Holt Skinner --- .github/actions/spelling/allow.txt | 6 + rag-grounding/README.md | 273 +- .../.eslintrc.json | 11 + .../grounded-generation-playground/.gitignore | 36 + .../.prettierrc | 12 + .../.stylelintrc.json | 11 + .../grounded-generation-playground/README.md | 106 + .../grounded-generation-playground/app.yaml | 31 + .../components.json | 20 + .../next-env.d.ts | 5 + .../next.config.js | 23 + .../next.config.mjs | 20 + .../package-lock.json | 13945 ++++++++++++++++ .../package.json | 69 + .../postcss.config.mjs | 24 + .../src/app/api/chat/route.ts | 134 + .../src/app/api/grounded/route.ts | 121 + .../src/app/api/ungrounded/route.ts | 63 + .../src/app/favicon.ico | Bin 0 -> 25931 bytes .../src/app/fonts/GeistMonoVF.woff | Bin 0 -> 67864 bytes .../src/app/fonts/GeistVF.woff | Bin 0 -> 66268 bytes .../src/app/globals.css | 152 + .../src/app/layout.tsx | 34 + .../src/app/page.tsx | 709 + .../src/app/styles/page.tsx | 198 + .../src/components/ModelSelector.tsx | 51 + .../src/components/ui/about-page-content.tsx | 205 + .../src/components/ui/accordion.css | 85 + .../src/components/ui/accordion.tsx | 121 + .../src/components/ui/avatar.tsx | 66 + .../src/components/ui/button.tsx | 72 + .../src/components/ui/card.tsx | 94 + .../ui/example-question-greeting.tsx | 74 + .../src/components/ui/grounded-text-block.tsx | 128 + .../src/components/ui/grounding-option.tsx | 76 + .../src/components/ui/icons.tsx | 252 + .../src/components/ui/input.tsx | 40 + .../src/components/ui/label.tsx | 38 + .../src/components/ui/page-header.tsx | 53 + .../src/components/ui/page-nav-tabs.tsx | 54 + .../src/components/ui/page-sidebar.tsx | 147 + .../src/components/ui/scroll-area.tsx | 63 + .../src/components/ui/select.tsx | 173 + .../src/components/ui/slider.tsx | 41 + .../src/components/ui/switch.tsx | 45 + .../src/components/ui/tabs.tsx | 71 + .../src/components/ui/tooltip.tsx | 46 + .../src/lib/apiutils.ts | 223 + .../src/lib/grounded_content_citations.ts | 145 + .../src/lib/grounding_option_utils.ts | 57 + .../src/lib/grounding_options.ts | 39 + .../src/lib/utils.ts | 22 + .../tailwind.config.js | 92 + .../tailwind.config.ts | 35 + .../tsconfig.json | 27 + 55 files changed, 18502 insertions(+), 136 deletions(-) create mode 100644 search/grounded-generation-playground/.eslintrc.json create mode 100644 search/grounded-generation-playground/.gitignore create mode 100644 search/grounded-generation-playground/.prettierrc create mode 100644 search/grounded-generation-playground/.stylelintrc.json create mode 100644 search/grounded-generation-playground/README.md create mode 100644 search/grounded-generation-playground/app.yaml create mode 100644 search/grounded-generation-playground/components.json create mode 100644 search/grounded-generation-playground/next-env.d.ts create mode 100644 search/grounded-generation-playground/next.config.js create mode 100644 search/grounded-generation-playground/next.config.mjs create mode 100644 search/grounded-generation-playground/package-lock.json create mode 100644 search/grounded-generation-playground/package.json create mode 100644 search/grounded-generation-playground/postcss.config.mjs create mode 100644 search/grounded-generation-playground/src/app/api/chat/route.ts create mode 100644 search/grounded-generation-playground/src/app/api/grounded/route.ts create mode 100644 search/grounded-generation-playground/src/app/api/ungrounded/route.ts create mode 100644 search/grounded-generation-playground/src/app/favicon.ico create mode 100644 search/grounded-generation-playground/src/app/fonts/GeistMonoVF.woff create mode 100644 search/grounded-generation-playground/src/app/fonts/GeistVF.woff create mode 100644 search/grounded-generation-playground/src/app/globals.css create mode 100644 search/grounded-generation-playground/src/app/layout.tsx create mode 100644 search/grounded-generation-playground/src/app/page.tsx create mode 100644 search/grounded-generation-playground/src/app/styles/page.tsx create mode 100644 search/grounded-generation-playground/src/components/ModelSelector.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/about-page-content.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/accordion.css create mode 100644 search/grounded-generation-playground/src/components/ui/accordion.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/avatar.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/button.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/card.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/example-question-greeting.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/grounded-text-block.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/grounding-option.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/icons.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/input.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/label.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/page-header.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/page-nav-tabs.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/page-sidebar.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/scroll-area.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/select.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/slider.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/switch.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/tabs.tsx create mode 100644 search/grounded-generation-playground/src/components/ui/tooltip.tsx create mode 100644 search/grounded-generation-playground/src/lib/apiutils.ts create mode 100644 search/grounded-generation-playground/src/lib/grounded_content_citations.ts create mode 100644 search/grounded-generation-playground/src/lib/grounding_option_utils.ts create mode 100644 search/grounded-generation-playground/src/lib/grounding_options.ts create mode 100644 search/grounded-generation-playground/src/lib/utils.ts create mode 100644 search/grounded-generation-playground/tailwind.config.js create mode 100644 search/grounded-generation-playground/tailwind.config.ts create mode 100644 search/grounded-generation-playground/tsconfig.json diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 4da058dced..563db4d2c0 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -94,6 +94,7 @@ Gandalf Gatace GenTwo Genkit +Gfm Gisting Glickman Gmb @@ -116,6 +117,7 @@ Hikaru Hisaki Hmmm Hogwarts +Hossain Hubmann Hyperdisk ICICI @@ -296,6 +298,7 @@ Vijay Virat Viru VirusTotal +WAI WDIR WFH WNDCLASS @@ -366,6 +369,7 @@ cicd cimg claude clickable +clsx cmap codebase codebases @@ -386,6 +390,7 @@ csa cse ctd cupertino +cva dask dataframe datname @@ -647,6 +652,7 @@ ribeye ringspun rpet rrf +rsc rsp saaagesh scann diff --git a/rag-grounding/README.md b/rag-grounding/README.md index 5f5fdc17d4..43fb1b40b9 100644 --- a/rag-grounding/README.md +++ b/rag-grounding/README.md @@ -11,12 +11,12 @@ your convenience. ![Animated GIF showing "what is grounding"](./img/what-is-grounding.gif) -- Ungrounded generation relies on the LLM training data alone and is prone to - hallucinations when it doesn't have all the right facts -- **Grounding** a LLM with relevant facts provides fresh and potentially - private data to the model as part of it's input or prompt -- **RAG** is a technique which retrieves relevant facts, often via search, and - provides them to the LLM +- Ungrounded generation relies on the LLM training data alone and is prone to + hallucinations when it doesn't have all the right facts +- **Grounding** a LLM with relevant facts provides fresh and potentially + private data to the model as part of it's input or prompt +- **RAG** is a technique which retrieves relevant facts, often via search, and + provides them to the LLM Using RAG and Grounding to improve generations and reduce hallucinations is becoming commonplace. Doing so well and generating extremely high quality @@ -26,21 +26,27 @@ provides a platform of tools and APIs which help you build and maintain a great search engine and RAG application, and the evaluations needed to hill climb "quality". +## Building a Grounded Generation Application + +Grounded generation is crucial for enhancing the accuracy, factuality, and relevance of large language models (LLMs). By connecting LLMs to real-time data sources, including private enterprise data, grounding minimizes hallucinations and enables access to the latest information. + +The [Vertex AI Search Grounded Generation Playground](../search/grounded-generation-playground/README.md) showcases this by offering a Next.js-based interface for experimenting with the Vertex AI Search Grounded Generation API. It differentiates itself through features like a chat interface with multi-turn grounding, side-by-side comparison of grounded and ungrounded responses, dynamic retrieval with an adjustable threshold for cost optimization, and support for various grounding sources, including Google Search, Vertex AI Search, and custom integrations. Explore the playground to experience the power of grounded generation and learn how to integrate it into your own applications. For detailed instructions and setup, refer to the [grounded-generation-playground setup instructions](../search/grounded-generation-playground/README.md). + ## Measuring RAG/Grounding Quality See [this blog post: How to evaluate generated answers from RAG at scale on Vertex AI](https://medium.com/google-cloud/vqa-3-how-to-evaluate-generated-answers-from-rag-at-scale-on-vertex-ai-70bc397cb33d) for a more in-depth walkthrough. -- **[evaluate_rag_gen_ai_evaluation_service_sdk.ipynb](../gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb)**: - Evaluates RAG systems using the Gen AI Evaluation Service SDK, offering both - reference-free and reference-based evaluation methods with visualization. -- **[ragas_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/ragas_with_gemini.ipynb)**: - Evaluates RAG pipelines using the RAGAS framework and the Gemini Pro model - for Q&A tasks. -- **[deepeval_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/deepeval_with_gemini.ipynb)**: - Evaluates Gemini Pro's performance on a question-answering task using - DeepEval and the Vertex AI Gemini API, including Pytest integration. +- **[evaluate_rag_gen_ai_evaluation_service_sdk.ipynb](../gemini/evaluation/evaluate_rag_gen_ai_evaluation_service_sdk.ipynb)**: + Evaluates RAG systems using the Gen AI Evaluation Service SDK, offering both + reference-free and reference-based evaluation methods with visualization. +- **[ragas_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/ragas_with_gemini.ipynb)**: + Evaluates RAG pipelines using the RAGAS framework and the Gemini Pro model + for Q&A tasks. +- **[deepeval_with_gemini.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag-evaluation/deepeval_with_gemini.ipynb)**: + Evaluates Gemini Pro's performance on a question-answering task using + DeepEval and the Vertex AI Gemini API, including Pytest integration. ## Out of the Box RAG/Grounding @@ -50,33 +56,33 @@ you can build a RAG/Grounding system in a few clicks or a few lines of code and be ready for scale with high quality results. Vertex AI Search is an end-to-end Search engine builder, giving you Google quality search for your own data. -- **[Vertex AI Search - sample Web App](../search/web-app/)**: Take a look at - this sample web app using Vertex AI Search, which is a flexible and easy to - use "out of the box" solution for search & RAG/Grounding. -- **[bulk_question_answering.ipynb](../search/bulk-question-answering/bulk_question_answering.ipynb)**: - Processes questions from a CSV and outputs the results (top documents and - extractive answers) to a TSV file using Vertex AI Search. -- **[contract_analysis.ipynb](../search/retrieval-augmented-generation/examples/contract_analysis.ipynb)**: - Demonstrates RAG for contract analysis using Palm2, LangChain, and a vector - store, with a Gradio interface for querying contracts and retrieving answers - with source references. -- **[question_answering.ipynb](../search/retrieval-augmented-generation/examples/question_answering.ipynb)**: - Builds a question-answering system using Vertex AI Search and LangChain to - retrieve information from unstructured documents and leverage LLMs for - answering with citations. -- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: - Builds a question-answering system from Google Cloud documentation using RAG - and evaluates the impact of different parameter settings on model - performance. -- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: - Showcase specific RAG use cases -- **[search_data_blending_with_gemini_summarization.ipynb](../search/search_data_blending_with_gemini_summarization.ipynb)**: - Demonstrates calling a search app that blends information from multiple - stores (GCS, BQ, site) and summarizes search snippets and responses using - the Gemini Pro model. -- **[vertexai_search_options.ipynb](../search/vertexai-search-options/vertexai_search_options.ipynb)**: - Demonstrates three approaches for using Vertex AI Search: direct API usage, - grounding with Gemini, and integration with LangChain. +- **[Vertex AI Search - sample Web App](../search/web-app/)**: Take a look at + this sample web app using Vertex AI Search, which is a flexible and easy to + use "out of the box" solution for search & RAG/Grounding. +- **[bulk_question_answering.ipynb](../search/bulk-question-answering/bulk_question_answering.ipynb)**: + Processes questions from a CSV and outputs the results (top documents and + extractive answers) to a TSV file using Vertex AI Search. +- **[contract_analysis.ipynb](../search/retrieval-augmented-generation/examples/contract_analysis.ipynb)**: + Demonstrates RAG for contract analysis using Palm2, LangChain, and a vector + store, with a Gradio interface for querying contracts and retrieving answers + with source references. +- **[question_answering.ipynb](../search/retrieval-augmented-generation/examples/question_answering.ipynb)**: + Builds a question-answering system using Vertex AI Search and LangChain to + retrieve information from unstructured documents and leverage LLMs for + answering with citations. +- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: + Builds a question-answering system from Google Cloud documentation using RAG + and evaluates the impact of different parameter settings on model + performance. +- **[rag_google_documentation.ipynb](../search/retrieval-augmented-generation/examples/rag_google_documentation.ipynb)**: + Showcase specific RAG use cases +- **[search_data_blending_with_gemini_summarization.ipynb](../search/search_data_blending_with_gemini_summarization.ipynb)**: + Demonstrates calling a search app that blends information from multiple + stores (GCS, BQ, site) and summarizes search snippets and responses using + the Gemini Pro model. +- **[vertexai_search_options.ipynb](../search/vertexai-search-options/vertexai_search_options.ipynb)**: + Demonstrates three approaches for using Vertex AI Search: direct API usage, + grounding with Gemini, and integration with LangChain. Vertex AI Search can be configured to adapt to many different use cases and data. @@ -106,24 +112,24 @@ engine, or perhaps there are constraints on what you can use. We have many component APIs which can be used to build a RAG/Grounding pipeline of your own. -- [Vertex AI APIs for building search and RAG](https://cloud.google.com/generative-ai-app-builder/docs/builder-apis) - has a list of several APIs you can use in isolation or in combination +- [Vertex AI APIs for building search and RAG](https://cloud.google.com/generative-ai-app-builder/docs/builder-apis) + has a list of several APIs you can use in isolation or in combination We have a managed service to assemble component using LlamaIndex style SDK. -- [LlamaIndex on Vertex](https://cloud.google.com/vertex-ai/generative-ai/docs/rag-overview) - allows you to assemble a RAG search using popular OSS framework and - components from Google or Open Source +- [LlamaIndex on Vertex](https://cloud.google.com/vertex-ai/generative-ai/docs/rag-overview) + allows you to assemble a RAG search using popular OSS framework and + components from Google or Open Source We have a few reference architectures you can use to build your own RAG/Grounding pipeline from the ground up. -- [This end-to-end DIY RAG example in a notebook](https://github.com/GoogleCloudPlatform/applied-ai-engineering-samples/blob/main/genai-on-vertex-ai/retrieval_augmented_generation/diy_rag_with_vertexai_apis/build_grounded_rag_app_with_vertex.ipynb) - written in LangChain and using some of these APIs -- The Google Cloud Architecture Center has reference architectures on - [building a RAG infrastructure with GKE](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-gke) - or - [using alloydb and a few Vertex services](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-vertex-ai) +- [This end-to-end DIY RAG example in a notebook](https://github.com/GoogleCloudPlatform/applied-ai-engineering-samples/blob/main/genai-on-vertex-ai/retrieval_augmented_generation/diy_rag_with_vertexai_apis/build_grounded_rag_app_with_vertex.ipynb) + written in LangChain and using some of these APIs +- The Google Cloud Architecture Center has reference architectures on + [building a RAG infrastructure with GKE](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-gke) + or + [using alloydb and a few Vertex services](https://cloud.google.com/architecture/rag-capable-gen-ai-app-using-vertex-ai) More coming soon. @@ -148,86 +154,81 @@ augmented generation (RAG). The best explanation of embeddings I've seen -- **[intro_Vertex_AI_embeddings.ipynb](../gemini/qa-ops/intro_Vertex_AI_embeddings.ipynb)**: - Introduces Vertex AI's text and multimodal embeddings APIs and demonstrates - their use in building a simple e-commerce search application with text, - image, and video queries. -- **[hybrid-search.ipynb](../embeddings/hybrid-search.ipynb)**: Demonstrates - hybrid search (combining semantic and keyword search) using Vertex AI Vector - Search. -- **[intro-textemb-vectorsearch.ipynb](../embeddings/intro-textemb-vectorsearch.ipynb)**: - Demonstrates building semantic search capabilities using Vertex AI's text - embeddings and vector search, grounding LLM outputs with real-world data. -- **[vector-search-quickstart.ipynb](../embeddings/vector-search-quickstart.ipynb)**: - Provides a quickstart tutorial for Vertex AI Vector Search, guiding users - through setting up, building, deploying, and querying a vector search index - using sample product data. -- **[bq-vector-search-log-outlier-detection.ipynb](../embeddings/use-cases/outlier-detection/bq-vector-search-log-outlier-detection.ipynb)**: - Demonstrates log anomaly detection and investigation using Vertex AI, - BigQuery, and text embeddings to identify semantically similar past actions - for outlier analysis. +- **[intro_Vertex_AI_embeddings.ipynb](../gemini/qa-ops/intro_Vertex_AI_embeddings.ipynb)**: + Introduces Vertex AI's text and multimodal embeddings APIs and demonstrates + their use in building a simple e-commerce search application with text, + image, and video queries. +- **[hybrid-search.ipynb](../embeddings/hybrid-search.ipynb)**: Demonstrates + hybrid search (combining semantic and keyword search) using Vertex AI Vector + Search. +- **[intro-textemb-vectorsearch.ipynb](../embeddings/intro-textemb-vectorsearch.ipynb)**: + Demonstrates building semantic search capabilities using Vertex AI's text + embeddings and vector search, grounding LLM outputs with real-world data. +- **[vector-search-quickstart.ipynb](../embeddings/vector-search-quickstart.ipynb)**: + Provides a quickstart tutorial for Vertex AI Vector Search, guiding users + through setting up, building, deploying, and querying a vector search index + using sample product data. +- **[bq-vector-search-log-outlier-detection.ipynb](../embeddings/use-cases/outlier-detection/bq-vector-search-log-outlier-detection.ipynb)**: + Demonstrates log anomaly detection and investigation using Vertex AI, + BigQuery, and text embeddings to identify semantically similar past actions + for outlier analysis. ### Gemini -- **[intro-grounding-gemini.ipynb](../gemini/grounding/intro-grounding-gemini.ipynb)**: - Demonstrates grounding LLM responses in Google Search and Vertex AI Search - using Gemini, improving response accuracy and reducing hallucinations. -- **[intro-grounding.ipynb](../language/grounding/intro-grounding.ipynb)**: - Demonstrates using Vertex AI's grounding feature to improve LLM response - accuracy and relevance by grounding them in Google Search or custom Vertex - AI Search data stores. -- **[building_DIY_multimodal_qa_system_with_mRAG.ipynb](../gemini/qa-ops/building_DIY_multimodal_qa_system_with_mRAG.ipynb)**: - Builds a custom multimodal question-answering system using mRAG. -- **[code_retrieval_augmented_generation.ipynb](../language/code/code_retrieval_augmented_generation.ipynb)**: - Demonstrates RAG for code using Gemini, LangChain, FAISS, and Vertex AI's - Embeddings API to enhance code generation by incorporating external - knowledge from the Google Cloud Generative AI GitHub repository. -- **[langchain_bigquery_data_loader.ipynb](../language/orchestration/langchain/langchain_bigquery_data_loader.ipynb)**: - Demonstrates using LangChain's BigQuery Data Loader to query BigQuery data, - integrate it with a Vertex AI LLM, and build a chain to generate and execute - SQL queries for targeted customer analysis. -- **[question_answering_documents.ipynb](../language/use-cases/document-qa/question_answering_documents.ipynb)**: - Demonstrates three methods (stuffing, map-reduce, and map-reduce with - embeddings) for building a question-answering system using the Vertex AI - PaLM API to efficiently handle large document datasets. -- **[question_answering_documents_langchain.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain.ipynb)**: - Demonstrates building a question-answering system using LangChain and Vertex - AI's PaLM API, comparing different methods (stuffing, map-reduce, refine) - for handling large documents, and showcasing the improved efficiency of - using similarity search with embeddings. -- **[question_answering_documents_langchain_matching_engine.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb)**: - Demonstrates a question-answering system using LangChain, Vertex AI's PaLM - API, and Matching Engine for retrieval-augmented generation, enabling - fact-grounded responses with source citations. -- **[summarization_large_documents.ipynb](../language/use-cases/document-summarization/summarization_large_documents.ipynb)**: - Demonstrates four methods (stuffing, MapReduce, MapReduce with overlapping - chunks, and MapReduce with rolling summaries) for summarizing large - documents using Vertex AI's generative models, addressing challenges of - exceeding context length limits. -- **[summarization_large_documents_langchain.ipynb](../language/use-cases/document-summarization/summarization_large_documents_langchain.ipynb)**: - Demonstrates three LangChain methods (Stuffing, MapReduce, Refine) for - summarizing large documents using Vertex AI models, comparing their - effectiveness and limitations. -- **[llamaindex_workflows.ipynb](../gemini/orchestration/llamaindex_workflows.ipynb)** Using LlamaIndex Workflows to build an event driven RAG flow. +- **[intro-grounding-gemini.ipynb](../gemini/grounding/intro-grounding-gemini.ipynb)**: + Demonstrates grounding LLM responses in Google Search and Vertex AI Search + using Gemini, improving response accuracy and reducing hallucinations. +- **[intro-grounding.ipynb](../language/grounding/intro-grounding.ipynb)**: + Demonstrates using the Vertex AI grounding feature to improve LLM response + accuracy and relevance by grounding them in Google Search or custom Vertex AI Search data stores. +- **[building_DIY_multimodal_qa_system_with_mRAG.ipynb](../gemini/qa-ops/building_DIY_multimodal_qa_system_with_mRAG.ipynb)**: + Builds a custom multimodal question-answering system using mRAG. +- **[code_retrieval_augmented_generation.ipynb](../language/code/code_retrieval_augmented_generation.ipynb)**: + Demonstrates RAG for code using Gemini, LangChain, FAISS, and the Vertex AI Embeddings API to enhance code generation by incorporating external + knowledge from the Google Cloud Generative AI GitHub repository. +- **[langchain_bigquery_data_loader.ipynb](../language/orchestration/langchain/langchain_bigquery_data_loader.ipynb)**: + Demonstrates using LangChain's BigQuery Data Loader to query BigQuery data, + integrate it with a Vertex AI LLM, and build a chain to generate and execute + SQL queries for targeted customer analysis. +- **[question_answering_documents.ipynb](../language/use-cases/document-qa/question_answering_documents.ipynb)**: + Demonstrates three methods (stuffing, map-reduce, and map-reduce with + embeddings) for building a question-answering system using the Vertex AI + PaLM API to efficiently handle large document datasets. +- **[question_answering_documents_langchain.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain.ipynb)**: + Demonstrates building a question-answering system using LangChain and the Vertex AI PaLM API, comparing different methods (stuffing, map-reduce, refine) + for handling large documents, and showcasing the improved efficiency of + using similarity search with embeddings. +- **[question_answering_documents_langchain_matching_engine.ipynb](../language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb)**: + Demonstrates a question-answering system using LangChain, the Vertex AI PaLM API, and Matching Engine for retrieval-augmented generation, enabling + fact-grounded responses with source citations. +- **[summarization_large_documents.ipynb](../language/use-cases/document-summarization/summarization_large_documents.ipynb)**: + Demonstrates four methods (stuffing, MapReduce, MapReduce with overlapping + chunks, and MapReduce with rolling summaries) for summarizing large + documents using Vertex AI generative models, addressing challenges of + exceeding context length limits. +- **[summarization_large_documents_langchain.ipynb](../language/use-cases/document-summarization/summarization_large_documents_langchain.ipynb)**: + Demonstrates three LangChain methods (Stuffing, MapReduce, Refine) for + summarizing large documents using Vertex AI models, comparing their + effectiveness and limitations. +- **[llamaindex_workflows.ipynb](../gemini/orchestration/llamaindex_workflows.ipynb)** Using LlamaIndex Workflows to build an event driven RAG flow. ### Open Models -- **[cloud_run_ollama_gemma2_rag_qa.ipynb](../open-models/serving/cloud_run_ollama_gemma2_rag_qa.ipynb)**: - Demonstrates deploying Gemma 2 on Google Cloud Run with GPU acceleration - using Ollama and LangChain, building a RAG question-answering application. +- **[cloud_run_ollama_gemma2_rag_qa.ipynb](../open-models/serving/cloud_run_ollama_gemma2_rag_qa.ipynb)**: + Demonstrates deploying Gemma 2 on Google Cloud Run with GPU acceleration + using Ollama and LangChain, building a RAG question-answering application. ## Agents on top of RAG -- **[tutorial_vertex_ai_search_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_vertex_ai_search_rag_agent.ipynb)**: - Demonstrates building and deploying a conversational search agent on Vertex - AI using LangChain, a reasoning engine, and RAG with Vertex AI Search to - query a movie dataset. -- **[tutorial_alloydb_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb)**: - Demonstrates deploying a RAG application using LangChain, AlloyDB for - PostgreSQL, and Vertex AI, covering setup, deployment, and cleanup. -- **[tutorial_cloud_sql_pg_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb)**: - Demonstrates deploying a RAG application using LangChain, Vertex AI, and - Cloud SQL for PostgreSQL, enabling semantic search and LLM-based responses. +- **[tutorial_vertex_ai_search_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_vertex_ai_search_rag_agent.ipynb)**: + Demonstrates building and deploying a conversational search agent on Vertex AI using LangChain, a reasoning engine, and RAG with Vertex AI Search to + query a movie dataset. +- **[tutorial_alloydb_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_alloydb_rag_agent.ipynb)**: + Demonstrates deploying a RAG application using LangChain, AlloyDB for + PostgreSQL, and Vertex AI, covering setup, deployment, and cleanup. +- **[tutorial_cloud_sql_pg_rag_agent.ipynb](../gemini/reasoning-engine/tutorial_cloud_sql_pg_rag_agent.ipynb)**: + Demonstrates deploying a RAG application using LangChain, Vertex AI, and + Cloud SQL for PostgreSQL, enabling semantic search and LLM-based responses. ## Use Cases @@ -236,17 +237,17 @@ grounding techniques in various applications. Feel free to dive into the notebooks that pique your interest and start building your own RAG-powered solutions. -- Examples of RAG in different domains +- Examples of RAG in different domains - - **[NLP2SQL_using_dynamic_RAG.ipynb](../gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb)** - - **[RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb](../gemini/use-cases/retrieval-augmented-generation/RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb)** - - **[code_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/code_rag.ipynb)** - - **[intra_knowledge_qna.ipynb](../gemini/use-cases/retrieval-augmented-generation/intra_knowledge_qna.ipynb)** - - **[intro_multimodal_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/intro_multimodal_rag.ipynb)** - - **[llamaindex_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/llamaindex_rag.ipynb)** - - **[multimodal_rag_langchain.ipynb](../gemini/use-cases/retrieval-augmented-generation/multimodal_rag_langchain.ipynb)** - - **[small_to_big_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb)** + - **[NLP2SQL_using_dynamic_RAG.ipynb](../gemini/use-cases/retrieval-augmented-generation/NLP2SQL_using_dynamic_RAG.ipynb)** + - **[RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb](../gemini/use-cases/retrieval-augmented-generation/RAG_Based_on_Sensitive_Data_Protection_using_Faker.ipynb)** + - **[code_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/code_rag.ipynb)** + - **[intra_knowledge_qna.ipynb](../gemini/use-cases/retrieval-augmented-generation/intra_knowledge_qna.ipynb)** + - **[intro_multimodal_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/intro_multimodal_rag.ipynb)** + - **[llamaindex_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/llamaindex_rag.ipynb)** + - **[multimodal_rag_langchain.ipynb](../gemini/use-cases/retrieval-augmented-generation/multimodal_rag_langchain.ipynb)** + - **[small_to_big_rag.ipynb](../gemini/use-cases/retrieval-augmented-generation/small_to_big_rag/small_to_big_rag.ipynb)** -- Build RAG systems using BigQuery - - **[rag_qna_with_bq_and_featurestore.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_qna_with_bq_and_featurestore.ipynb)** - - **[rag_vector_embedding_in_bigquery.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_vector_embedding_in_bigquery.ipynb)** +- Build RAG systems using BigQuery + - **[rag_qna_with_bq_and_featurestore.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_qna_with_bq_and_featurestore.ipynb)** + - **[rag_vector_embedding_in_bigquery.ipynb](../gemini/use-cases/retrieval-augmented-generation/rag_vector_embedding_in_bigquery.ipynb)** diff --git a/search/grounded-generation-playground/.eslintrc.json b/search/grounded-generation-playground/.eslintrc.json new file mode 100644 index 0000000000..e4cc0fcc55 --- /dev/null +++ b/search/grounded-generation-playground/.eslintrc.json @@ -0,0 +1,11 @@ +{ + "extends": [ + "eslint:recommended", + "plugin:@typescript-eslint/strict-type-checked", + "plugin:@typescript-eslint/stylistic-type-checked", + "next", + "next/core-web-vitals", + "prettier" + ], + "rules": { "react/no-unescaped-entities": 0 } +} diff --git a/search/grounded-generation-playground/.gitignore b/search/grounded-generation-playground/.gitignore new file mode 100644 index 0000000000..0563835d34 --- /dev/null +++ b/search/grounded-generation-playground/.gitignore @@ -0,0 +1,36 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js +.yarn/install-state.gz + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts \ No newline at end of file diff --git a/search/grounded-generation-playground/.prettierrc b/search/grounded-generation-playground/.prettierrc new file mode 100644 index 0000000000..21dd6c9875 --- /dev/null +++ b/search/grounded-generation-playground/.prettierrc @@ -0,0 +1,12 @@ +{ + "printWidth": 90, + "tabWidth": 2, + "singleQuote": true, + "trailingComma": "all", + "bracketSpacing": true, + "semi": true, + "useTabs": false, + "bracketSameLine": false, + "jsxSingleQuote": false, + "arrowParens": "always" +} diff --git a/search/grounded-generation-playground/.stylelintrc.json b/search/grounded-generation-playground/.stylelintrc.json new file mode 100644 index 0000000000..6cdaedc897 --- /dev/null +++ b/search/grounded-generation-playground/.stylelintrc.json @@ -0,0 +1,11 @@ +{ + "extends": ["stylelint-config-standard", "stylelint-config-tailwindcss"], + "rules": { + "at-rule-no-unknown": [ + true, + { + "ignoreAtRules": ["tailwind", "apply", "layer", "screen"] + } + ] + } +} diff --git a/search/grounded-generation-playground/README.md b/search/grounded-generation-playground/README.md new file mode 100644 index 0000000000..3d95faab3f --- /dev/null +++ b/search/grounded-generation-playground/README.md @@ -0,0 +1,106 @@ +# Vertex AI Search Grounded Generation Playground + +This demo showcases how to use Vertex AI [Grounded Generation API](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen) with a Next.js frontend. It provides a user-friendly interface for exploring both chat-based and side-by-side comparisons of grounded and ungrounded responses. This allows you to test different models and grounding sources, including [Google Search](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen#web-grounding) and [Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen#inline-vais). + +## What is Grounded Generation? + +The [Grounded Generation API](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen) addresses a key limitation of foundational Large Language Models (LLMs): their reliance on data frozen at the time of training. This means LLMs are unaware of recent information and can't access your private enterprise data, leading to potential inaccuracies or "hallucinations." Grounding connects LLMs to live, relevant data, significantly improving the factuality and usefulness of their responses. + +With RAG, we can retrieve relevant information from external sources (like Google Search or your private data) before passing the user's query to the LLM. This additional context allows the model to generate grounded, factual responses. + +## Why Grounding is Important + +- **Increased Accuracy and Factuality:** Grounded responses are based on verifiable information, reducing hallucinations and improving trustworthiness. +- **Access to Live and Private Data:** Grounding allows LLMs to use the latest web data and your enterprise's internal knowledge, enabling them to answer questions they couldn't otherwise. +- **Improved Relevance:** By providing context, grounding ensures responses are more relevant to the user's specific needs and situation. +- **Transparency and Verifiability:** Grounded responses include source citations, allowing users to easily verify the information and explore the original sources for deeper understanding. +- **Cost Optimization (Dynamic Retrieval):** Dynamic retrieval minimizes costs by only using external search when necessary. +- **Differentiation:** Grounding your LLM with your private data allows you to create differentiated AI experiences tailored to your business needs. + +## Grounded Generation Playground Features + +- **Chat Interface:** Engage in interactive conversations with the model, utilizing grounded generation for more factual and informative responses. The chat maintains session context, so follow-up questions can build upon previous interactions and retrieved data. This showcases the grounding in [multiple turns of a conversation capability](https://cloud.google.com/generative-ai-app-builder/docs/grounded-gen#multi-turn-generation) of the Grounded Generation API. +- **Comparison Mode:** Directly compare grounded and ungrounded responses side-by-side to see the impact of grounding on accuracy and relevance. This clearly demonstrates the benefits of grounding for various query types. +- **Model Selection:** Choose from different Gemini models, including those specifically optimized for higher factuality and groundedness. +- **Grounding Source Selection:** Toggle between different sources: + + - **Google Search:** Access up-to-date information from the web. + - **Vertex AI Search:** Ground responses using your private enterprise data stored in Vertex AI Search. This enables tailored, context-aware responses based on your internal knowledge. + - **Custom Sources:** Integrate other search engines (like ElasticSearch) or databases via cloud functions. This offers flexibility and allows you to leverage your existing search infrastructure. + - **Third-party Data (Coming Soon):** Google is collaborating with partners like Moody's, Thomson Reuters, and MSCI to provide access to curated, authoritative data sources. + +- **Retrieval Threshold (Dynamic Retrieval):** Control when Google Search is used with an adjustable threshold. This dynamic retrieval optimizes cost and latency by only performing web searches when the model's existing knowledge is insufficient. This is a crucial feature for cost-effective, real-world applications. +- **Code Examples:** View JavaScript, Python, and cURL examples for interacting with the Grounded Generation API, simplifying integration into your own projects. + +## Local Development + +1. **Prerequisites:** + + - Node.js and npm (or yarn/pnpm) installed. + - A Google Cloud project with the Vertex AI API enabled. + - Set up authentication: The app uses Application Default Credentials. Run `gcloud auth application-default login`. + - Set environment variables (see below). + +2. **Clone the repository:** + + ```bash + git clone REPOSITORY + cd grounded-generation-playground + ``` + +3. **Install dependencies:** + + ```bash + npm install + ``` + +4. **Set environment variables:** + + Modify the `.env` file with your project details. + + ```bash + PROJECT_ID=your-google-cloud-project-id + PROJECT_NUMBER=your-google-cloud-project-number + LOCATION=your-google-cloud-project-location (e.g., us-central1) + ``` + + Replace the placeholders with your actual project details. You can find your project number in the Google Cloud Console project settings. + +5. **Run the development server:** + + ```bash + npm run dev + ``` + + The app will be accessible at [http://localhost:3000](http://localhost:3000). + +## Deployment to Google Cloud + +This application can be deployed to Google Cloud using either App Engine or Cloud Run. Both methods are detailed below. The provided `app.yaml` file is configured for App Engine. + +### App Engine Deployment + +1. **Prerequisites:** + + - gcloud CLI installed and configured with your project. + +2. **Setup Environment Variables:** + + - Modify the `app.yaml` file to set the correct project ID, location, and project number. + +3. **Deploy:** + From the root directory of the project, run: + +```bash +gcloud app deploy +``` + +This will deploy the application to App Engine. The deployed URL will be displayed after the deployment is complete. + +## Contributing + +Contributions are welcome! Please see the `CONTRIBUTING.md` file for details. + +## License + +This project is licensed under the Apache License 2.0. See the `LICENSE` file for details. diff --git a/search/grounded-generation-playground/app.yaml b/search/grounded-generation-playground/app.yaml new file mode 100644 index 0000000000..64bd386c13 --- /dev/null +++ b/search/grounded-generation-playground/app.yaml @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +runtime: nodejs +env: flex + +runtime_config: + operating_system: 'ubuntu22' + runtime_version: '20' + +service: gg-demo-nextjs +# Ensure all files are served +handlers: + - url: /.* + script: auto +# Optional: Add env variables of your choice +env_variables: + PROJECT_ID: 'your-project-id' + LOCATION: 'your-location' + PROJECT_NUMBER: 'your-project-number' diff --git a/search/grounded-generation-playground/components.json b/search/grounded-generation-playground/components.json new file mode 100644 index 0000000000..e79d1c138b --- /dev/null +++ b/search/grounded-generation-playground/components.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "src/app/globals.css", + "baseColor": "zinc", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + } +} diff --git a/search/grounded-generation-playground/next-env.d.ts b/search/grounded-generation-playground/next-env.d.ts new file mode 100644 index 0000000000..40c3d68096 --- /dev/null +++ b/search/grounded-generation-playground/next-env.d.ts @@ -0,0 +1,5 @@ +/// +/// + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information. diff --git a/search/grounded-generation-playground/next.config.js b/search/grounded-generation-playground/next.config.js new file mode 100644 index 0000000000..947f3c5bbc --- /dev/null +++ b/search/grounded-generation-playground/next.config.js @@ -0,0 +1,23 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module.exports = { + async redirects() { + return [ + // Make sure there are no conflicting redirects for /chat + ]; + }, +}; diff --git a/search/grounded-generation-playground/next.config.mjs b/search/grounded-generation-playground/next.config.mjs new file mode 100644 index 0000000000..0a380a2ae7 --- /dev/null +++ b/search/grounded-generation-playground/next.config.mjs @@ -0,0 +1,20 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @type {import('next').NextConfig} */ +const nextConfig = {}; + +export default nextConfig; diff --git a/search/grounded-generation-playground/package-lock.json b/search/grounded-generation-playground/package-lock.json new file mode 100644 index 0000000000..cfd4562fd8 --- /dev/null +++ b/search/grounded-generation-playground/package-lock.json @@ -0,0 +1,13945 @@ +{ + "name": "gg_demo_nextjs", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "gg_demo_nextjs", + "version": "0.1.0", + "dependencies": { + "@google-cloud/vertexai": "^1.7.0", + "@radix-ui/react-accordion": "^1.2.1", + "@radix-ui/react-avatar": "^1.1.0", + "@radix-ui/react-icons": "^1.3.0", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-scroll-area": "^1.1.0", + "@radix-ui/react-select": "^2.1.1", + "@radix-ui/react-slider": "^1.2.0", + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.0", + "@radix-ui/react-tabs": "^1.1.0", + "@radix-ui/react-tooltip": "^1.1.2", + "@types/react-helmet": "^6.1.11", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "gaxios": "^6.7.1", + "google-auth-library": "^9.14.1", + "jsonstream": "^1.0.3", + "jsonstream-next": "^3.0.0", + "lucide-react": "^0.439.0", + "next": "14.2.9", + "react": "^18", + "react-dom": "^18", + "react-markdown": "^9.0.1", + "remark-gfm": "^4.0.0", + "tailwind-merge": "^2.5.2", + "tailwindcss-animate": "^1.0.7" + }, + "devDependencies": { + "@tailwindcss/typography": "^0.5.15", + "@testing-library/jest-dom": "^6.5.0", + "@testing-library/react": "^16.0.1", + "@types/node": "^20.16.5", + "@types/node-fetch": "^2.6.11", + "@types/react": "^18", + "@types/react-dom": "^18", + "@typescript-eslint/eslint-plugin": "^8.5.0", + "@typescript-eslint/parser": "^8.5.0", + "eslint": "^8.0.0", + "eslint-config-next": "^14.2.14", + "eslint-config-prettier": "^9.1.0", + "eslint-plugin-prettier": "^5.2.1", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0", + "postcss": "^8", + "prettier": "^3.3.3", + "stylelint": "^16.9.0", + "stylelint-config-standard": "^36.0.1", + "stylelint-config-tailwindcss": "^0.0.7", + "tailwindcss": "^3.4.1", + "typescript": "^5" + } + }, + "node_modules/@adobe/css-tools": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.0.tgz", + "integrity": "sha512-Ff9+ksdQQB3rMncgqDK78uLznstjyfIf2Arnh22pW8kBpLs6rpKDwgnZT46hin5Hl1WzazzK64DOrhSwYpS7bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.25.4.tgz", + "integrity": "sha512-+LGRog6RAsCJrrrg/IO6LGmpphNe5DiK30dGjCoxxeGv49B10/3XYGxPsAwrDlMFcFEvdAUavDT8r9k/hSyQqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.25.2.tgz", + "integrity": "sha512-BBt3opiCOxUr9euZ5/ro/Xv8/V7yJ5bjYMqG/C1YAo8MIKAnumZalCN+msbci3Pigy4lIQfPUpfMM27HMGaYEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.0", + "@babel/helper-compilation-targets": "^7.25.2", + "@babel/helper-module-transforms": "^7.25.2", + "@babel/helpers": "^7.25.0", + "@babel/parser": "^7.25.0", + "@babel/template": "^7.25.0", + "@babel/traverse": "^7.25.2", + "@babel/types": "^7.25.2", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.6.tgz", + "integrity": "sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.25.6", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz", + "integrity": "sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.25.2", + "@babel/helper-validator-option": "^7.24.8", + "browserslist": "^4.23.1", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz", + "integrity": "sha512-BjyRAbix6j/wv83ftcVJmBt72QtHI56C7JXZoG2xATiLpmoC7dpd8WnkikExHDVPpi/3qCmO6WY1EaXOluiecQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "@babel/traverse": "^7.25.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz", + "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz", + "integrity": "sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.25.6.tgz", + "integrity": "sha512-Xg0tn4HcfTijTwfDwYlvVCl43V6h4KyVVX2aEm4qdO/PC6L2YvzLHFdmxhoeSA3eslcE6+ZVXHgWwopXYLNq4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.6.tgz", + "integrity": "sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.25.6" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.25.6.tgz", + "integrity": "sha512-sXaDXaJN9SNLymBdlWFA+bjzBhFD617ZaFiY13dGt7TVslVvVgA6fkZOP7Ki3IGElC45lwHdOTrCtKZGVAWeLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.4.tgz", + "integrity": "sha512-uMOCoHVU52BsSWxPOMVv5qKRdeSlPuImUCB2dlPuBSU+W2/ROE7/Zg8F2Kepbk+8yBa68LlRKxO+xgEVWorsDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.6.tgz", + "integrity": "sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz", + "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.25.0", + "@babel/types": "^7.25.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.6.tgz", + "integrity": "sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.6", + "@babel/parser": "^7.25.6", + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.6", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.6.tgz", + "integrity": "sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.24.8", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.1.tgz", + "integrity": "sha512-lSquqZCHxDfuTg/Sk2hiS0mcSFCEBuj49JfzPHJogDBT0mGCyY5A1AQzBWngitrp7i1/HAZpIgzF/VjhOEIJIg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.1" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.1.tgz", + "integrity": "sha512-UBqaiu7kU0lfvaP982/o3khfXccVlHPWp0/vwwiIgDF0GmqqqxoiXC/6FCjlS9u92f7CoEz6nXKQnrn1kIAkOw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/media-query-list-parser": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-3.0.1.tgz", + "integrity": "sha512-HNo8gGD02kHmcbX6PvCoUuOQvn4szyB9ca63vZHKX5A81QytgDG4oxG4IaEfHTlEZSZ6MjPEMWIVU+zF2PZcgw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.1", + "@csstools/css-tokenizer": "^3.0.1" + } + }, + "node_modules/@csstools/selector-specificity": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-4.0.0.tgz", + "integrity": "sha512-189nelqtPd8++phaHNwYovKZI0FOzH1vQEE3QhHHkNIGrg5fSs9CbYP3RvfEH5geztnIA9Jwq91wyOIwAW5JIQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^6.1.0" + } + }, + "node_modules/@dual-bundle/import-meta-resolve": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@dual-bundle/import-meta-resolve/-/import-meta-resolve-4.1.0.tgz", + "integrity": "sha512-+nxncfwHM5SgAtrVzgpzJOI1ol0PkumhVo469KCf9lUi21IGcY90G98VuHm9VRrUypmAzawAHO9bs6hqeADaVg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.11.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.11.0.tgz", + "integrity": "sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/eslintrc/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.7.tgz", + "integrity": "sha512-yDzVT/Lm101nQ5TCVeK65LtdN7Tj4Qpr9RTXJ2vPFLqtLxwOrpoxAHAJI8J3yYWUc40J0BDBheaitK5SJmno2g==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.7" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.6.10", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.10.tgz", + "integrity": "sha512-fskgCFv8J8OamCmyun8MfjB1Olfn+uZKjOKZ0vhYF3gRmEUXcGOjxWL8bBr7i4kIuPZ2KD2S3EUIOxnjC8kl2A==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.6.0", + "@floating-ui/utils": "^0.2.7" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.1.tgz", + "integrity": "sha512-4h84MJt3CHrtG18mGsXuLCHMrug49d7DFkU0RMIyshRveBeyV2hmV/pDaF2Uxtu8kgq5r46llp5E5FQiR0K2Yg==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.0.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.7.tgz", + "integrity": "sha512-X8R8Oj771YRl/w+c1HqAC1szL8zWQRwFvgDwT129k9ACdBoud/+/rX9V0qiMl6LWUdP9voC2nDVZYPMQQsb6eA==", + "license": "MIT" + }, + "node_modules/@google-cloud/vertexai": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-1.7.0.tgz", + "integrity": "sha512-N4YcVzFQ+sPN9c3SeMhbpLfWVbeaLxPbICKsJ6yKthcr4G7tdu9pCs3HUw+Mip0M2xgiKZ8/WWvq6FXbPnlrUA==", + "license": "Apache-2.0", + "dependencies": { + "google-auth-library": "^9.1.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/core/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/core/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/core/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/core/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/reporters/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@jest/reporters/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@jest/reporters/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@jest/reporters/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@next/env": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.9.tgz", + "integrity": "sha512-hnDAoDPMii31V0ivibI8p6b023jOF1XblWTVjsDUoZKwnZlaBtJFZKDwFqi22R8r9i6W08dThUWU7Bsh2Rg8Ww==", + "license": "MIT" + }, + "node_modules/@next/eslint-plugin-next": { + "version": "14.2.14", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.14.tgz", + "integrity": "sha512-kV+OsZ56xhj0rnTn6HegyTGkoa16Mxjrpk7pjWumyB2P8JVQb8S9qtkjy/ye0GnTr4JWtWG4x/2qN40lKZ3iVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "glob": "10.3.10" + } + }, + "node_modules/@next/eslint-plugin-next/node_modules/glob": { + "version": "10.3.10", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", + "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.5", + "minimatch": "^9.0.1", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@next/eslint-plugin-next/node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.9.tgz", + "integrity": "sha512-/kfQifl3uLYi3DlwFlzCkgxe6fprJNLzzTUFknq3M5wGYicDIbdGlxUl6oHpVLJpBB/CBY3Y//gO6alz/K4NWA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.9.tgz", + "integrity": "sha512-tK/RyhCmOCiXQ9IVdFrBbZOf4/1+0RSuJkebXU2uMEsusS51TjIJO4l8ZmEijH9gZa0pJClvmApRHi7JuBqsRw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.9.tgz", + "integrity": "sha512-tS5eqwsp2nO7mzywRUuFYmefNZsUKM/mTG3exK2jIHv9TEVklE1SByB1KMhFkqlit1PxS9YK1tV8BOV90Wpbrw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.9.tgz", + "integrity": "sha512-8svpeTFNAMTUMKQbEzE8qRAwl9o7mNBv7LR1bmSkQvo1oy4WrNyZbhWsldOiKrc4mZ5dfQkGYsI9T75mIFMfeA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.9.tgz", + "integrity": "sha512-0HNulLWpKTB7H5BhHCkEhcRAnWUHeAYCftrrGw3QC18+ZywTdAoPv/zEqKy/0adqt+ks4JDdlgSQ1lNKOKjo0A==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.9.tgz", + "integrity": "sha512-hhVFViPHLAVUJRNtwwm609p9ozWajOmRvzOZzzKXgiVGwx/CALxlMUeh+M+e0Zj6orENhWLZeilOPHpptuENsA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.9.tgz", + "integrity": "sha512-p/v6XlOdrk06xfN9z4evLNBqftVQUWiyduQczCwSj7hNh8fWTbzdVxsEiNOcajMXJbQiaX/ZzZdFgKVmmJnnGQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.9.tgz", + "integrity": "sha512-IcW9dynWDjMK/0M05E3zopbRen7v0/yEaMZbHFOSS1J/w+8YG3jKywOGZWNp/eCUVtUUXs0PW+7Lpz8uLu+KQA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.9.tgz", + "integrity": "sha512-gcbpoXyWZdVOBgNa5BRzynrL5UR1nb2ZT38yKgnphYU9UHjeecnylMHntrQiMg/QtONDcJPFC/PmsS47xIRYoA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nolyfill/is-core-module": { + "version": "1.0.39", + "resolved": "https://registry.npmjs.org/@nolyfill/is-core-module/-/is-core-module-1.0.39.tgz", + "integrity": "sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.4.0" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@pkgr/core": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz", + "integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/@radix-ui/number": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.0.tgz", + "integrity": "sha512-V3gRzhVNU1ldS5XhAPTom1fOIo4ccrjjJgmE+LI2h/WaFpHmx0MQApT+KZHnx8abG6Avtfcz4WoEciMnpFT3HQ==", + "license": "MIT" + }, + "node_modules/@radix-ui/primitive": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.0.tgz", + "integrity": "sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-accordion": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accordion/-/react-accordion-1.2.1.tgz", + "integrity": "sha512-bg/l7l5QzUjgsh8kjwDFommzAshnUsuVMV5NM56QVCm+7ZckYdd9P/ExR8xG/Oup0OajVxNLaHJ1tb8mXk+nzQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collapsible": "1.1.1", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.1", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-accordion/node_modules/@radix-ui/react-context": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.1.tgz", + "integrity": "sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz", + "integrity": "sha512-FmlW1rCg7hBpEBwFbjHwCW6AmWLQM6g/v0Sn8XbP9NvmSZ2San1FpQeyPtufzOMSIx7Y4dzjlHoifhp+7NkZhw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-avatar": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.0.tgz", + "integrity": "sha512-Q/PbuSMk/vyAd/UoIShVGZ7StHHeRFYU7wXmi5GV+8cLXflZAEpHL/F697H1klrzxKXNtZ97vWiC0q3RKUH8UA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.1.tgz", + "integrity": "sha512-1///SnrfQHJEofLokyczERxQbWfCGQlQ2XsCZMucVs6it+lq9iw4vXy+uDn1edlb58cOZOWSldnfPAYcT4O/Yg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.1", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-presence": "1.1.1", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-context": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.1.tgz", + "integrity": "sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collapsible/node_modules/@radix-ui/react-presence": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.1.tgz", + "integrity": "sha512-IeFXVi4YS1K0wVZzXNrbaaUvIJ3qdY+/Ih4eHFhWA9SwGR9UDX7Ck8abvL57C4cv3wwMvUE0OG69Qc3NCcTe/A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.0.tgz", + "integrity": "sha512-GZsZslMJEyo1VKm5L1ZJY8tGDxZNPAoUeQUIbKeJfoi7Q4kmig5AsgLMYYuyYbfjd8fBmFORAIwYAkXMnXZgZw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz", + "integrity": "sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz", + "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz", + "integrity": "sha512-BUuBvgThEiAXh2DWu93XsT+a3aWrGqolGlqqw5VU1kG7p/ZH2cuDlM1sRLNnY3QcBS69UIz2mcKhMxDsdewhjg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.0.tgz", + "integrity": "sha512-/UovfmmXGptwGcBQawLzvn2jOfM0t4z3/uKffoBlj724+n3FvBbZ7M0aaBOmkp6pqFYpO4yx8tSVJjx3Fl2jig==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-escape-keydown": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.0.tgz", + "integrity": "sha512-w6XZNUPVv6xCpZUqb/yN9DL6auvpGX3C/ee6Hdi16v2UUy25HV2Q5bcflsiDyT/g5RwbPQ/GIT1vLkeRb+ITBw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.0.tgz", + "integrity": "sha512-200UD8zylvEyL8Bx+z76RJnASR2gRMuxlgFCPAe/Q/679a/r0eK3MBVYMb7vZODZcffZBdob1EGnky78xmVvcA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-icons": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.0.tgz", + "integrity": "sha512-jQxj/0LKgp+j9BiTXz3O3sgs26RNet2iLWmsPyRz2SIcR4q/4SbazXfnYwbAr+vLYKSfc7qxzyGQA1HLlYiuNw==", + "license": "MIT", + "peerDependencies": { + "react": "^16.x || ^17.x || ^18.x" + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.0.tgz", + "integrity": "sha512-EJUrI8yYh7WOjNOqpoJaf1jlFIH2LvtgAl+YcFqNCa+4hj64ZXmPkAKOFs/ukjz3byN6bdb/AVUqHkI8/uWWMA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-label": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.0.tgz", + "integrity": "sha512-peLblDlFw/ngk3UWq0VnYaOLy6agTZZ+MUO/WhVfm14vJGML+xH4FAl2XQGLqdefjNb7ApRg6Yn7U42ZhmYXdw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz", + "integrity": "sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==", + "license": "MIT", + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0", + "@radix-ui/react-use-rect": "1.1.0", + "@radix-ui/react-use-size": "1.1.0", + "@radix-ui/rect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.1.tgz", + "integrity": "sha512-A3UtLk85UtqhzFqtoC8Q0KvR2GbXF3mtPgACSazajqq6A41mEQgo53iPzY4i6BwDxlIFqWIhiQ2G729n+2aw/g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.0.tgz", + "integrity": "sha512-Gq6wuRN/asf9H/E/VzdKoUtT8GC9PQc9z40/vEr0VCJ4u5XvvhWIrSsCB6vD2/cH7ugTdSfYq9fLJCcM00acrQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.0.tgz", + "integrity": "sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.0.tgz", + "integrity": "sha512-EA6AMGeq9AEeQDeSH0aZgG198qkfHSbvWTf1HvoDmOB5bBG/qTxjYMWUKMnYiV6J/iP/J8MEFSuB2zRU2n7ODA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-scroll-area": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.1.0.tgz", + "integrity": "sha512-9ArIZ9HWhsrfqS765h+GZuLoxaRHD/j0ZWOWilsCvYTpYJp8XwCqNG7Dt9Nu/TItKOdgLGkOPCodQvDc+UMwYg==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.0", + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-select": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.1.1.tgz", + "integrity": "sha512-8iRDfyLtzxlprOo9IicnzvpsO1wNCkuwzzCM+Z5Rb5tNOpCdMvcc2AkzX0Fz+Tz9v6NJ5B/7EEgyZveo4FBRfQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.0", + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-dismissable-layer": "1.1.0", + "@radix-ui/react-focus-guards": "1.1.0", + "@radix-ui/react-focus-scope": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-popper": "1.2.0", + "@radix-ui/react-portal": "1.1.1", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0", + "@radix-ui/react-use-callback-ref": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0", + "@radix-ui/react-use-previous": "1.1.0", + "@radix-ui/react-visually-hidden": "1.1.0", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.7" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slider": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.2.0.tgz", + "integrity": "sha512-dAHCDA4/ySXROEPaRtaMV5WHL8+JB/DbtyTbJjYkY0RXmKMO2Ln8DFZhywG5/mVQ4WqHDBc8smc14yPXPqZHYA==", + "license": "MIT", + "dependencies": { + "@radix-ui/number": "1.1.0", + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-collection": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-use-layout-effect": "1.1.0", + "@radix-ui/react-use-previous": "1.1.0", + "@radix-ui/react-use-size": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz", + "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-switch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.0.tgz", + "integrity": "sha512-OBzy5WAj641k0AOSpKQtreDMe+isX0MQJ1IVyF03ucdF3DunOnROVrjWs8zsXUxC3zfZ6JL9HFVCUlMghz9dJw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-use-previous": "1.1.0", + "@radix-ui/react-use-size": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tabs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.0.tgz", + "integrity": "sha512-bZgOKB/LtZIij75FSuPzyEti/XBhJH52ExgtdVqjCIh+Nx/FW+LhnbXtbCzIi34ccyMsyOja8T0thCzoHFXNKA==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-direction": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-roving-focus": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.2.tgz", + "integrity": "sha512-9XRsLwe6Yb9B/tlnYCPVUd/TFS4J7HuOZW345DCeC6vKIxQGMZdx21RK4VoZauPD5frgkXTYVS5y90L+3YBn4w==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.0", + "@radix-ui/react-dismissable-layer": "1.1.0", + "@radix-ui/react-id": "1.1.0", + "@radix-ui/react-popper": "1.2.0", + "@radix-ui/react-portal": "1.1.1", + "@radix-ui/react-presence": "1.1.0", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-slot": "1.1.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-visually-hidden": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz", + "integrity": "sha512-CasTfvsy+frcFkbXtSJ2Zu9JHpN8TYKxkgJGWbjiZhFivxaeW7rMeZt7QELGVLaYVfFMsKHjb7Ak0nMEe+2Vfw==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.1.0.tgz", + "integrity": "sha512-MtfMVJiSr2NjzS0Aa90NPTnvTSg6C/JLCV7ma0W6+OMV78vd8OyRpID+Ng9LxzsPbLeuBnWBA1Nq30AtBIDChw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", + "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.0.tgz", + "integrity": "sha512-+FPE0rOdziWSrH9athwI1R0HDVbWlEhd+FR+aSDk4uWGmSJ9Z54sdZVDQPZAinJhJXwfT+qnj969mCsT2gfm5w==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-previous": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.0.tgz", + "integrity": "sha512-Z/e78qg2YFnnXcW88A4JmTtm4ADckLno6F7OXotmkQfeuCVaKuYzqAATPhVzl3delXE7CxIV8shofPn3jPc5Og==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.0.tgz", + "integrity": "sha512-0Fmkebhr6PiseyZlYAOtLS+nb7jLmpqTrJyv61Pe68MKYW6OWdRE2kI70TaYY27u7H0lajqM3hSMMLFq18Z7nQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/rect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.0.tgz", + "integrity": "sha512-XW3/vWuIXHa+2Uwcc2ABSfcCledmXhhQPlGbfcRXbiUQI5Icjcg19BGCZVKKInYbvUCut/ufbbLLPFC5cbb1hw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.0.tgz", + "integrity": "sha512-N8MDZqtgCgG5S3aV60INAB475osJousYpZ4cTJ2cFbMpdHS5Y6loLTH8LPtkj2QN0x93J30HT/M3qJXM0+lyeQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.0.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.0.tgz", + "integrity": "sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==", + "license": "MIT" + }, + "node_modules/@rtsao/scc": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", + "integrity": "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.10.4", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.4.tgz", + "integrity": "sha512-WJgX9nzTqknM393q1QJDJmoW28kUfEnybeTfVNcNAPnIx210RXm2DiXiHzfNPJNIUUb1tJnz/l4QGtJ30PgWmA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/typography": { + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.15.tgz", + "integrity": "sha512-AqhlCXl+8grUz8uqExv5OTtgpjuVIwFTSXTrh8y9/pw6q2ek7fJ+Y8ZEVw7EB2DCcuCOtEjf9w3+J3rzts01uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash.castarray": "^4.4.0", + "lodash.isplainobject": "^4.0.6", + "lodash.merge": "^4.6.2", + "postcss-selector-parser": "6.0.10" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20" + } + }, + "node_modules/@tailwindcss/typography/node_modules/postcss-selector-parser": { + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@testing-library/dom": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.0.tgz", + "integrity": "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "chalk": "^4.1.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/jest-dom": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.5.0.tgz", + "integrity": "sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "^4.4.0", + "aria-query": "^5.0.0", + "chalk": "^3.0.0", + "css.escape": "^1.5.1", + "dom-accessibility-api": "^0.6.3", + "lodash": "^4.17.21", + "redent": "^3.0.0" + }, + "engines": { + "node": ">=14", + "npm": ">=6", + "yarn": ">=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz", + "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@testing-library/react": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.0.1.tgz", + "integrity": "sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0", + "@types/react-dom": "^18.0.0", + "react": "^18.0.0", + "react-dom": "^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/aria-query": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.8", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz", + "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.6", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.6.tgz", + "integrity": "sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jsdom": { + "version": "20.0.1", + "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-20.0.1.tgz", + "integrity": "sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/tough-cookie": "*", + "parse5": "^7.0.0" + } + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", + "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.16.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.16.5.tgz", + "integrity": "sha512-VwYCweNo3ERajwy0IUlqqcyZ8/A7Zwa9ZP3MnENWcB11AejO+tLy3pu850goUW2FC/IJMdZUfKpX/yxL1gymCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.19.2" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, + "node_modules/@types/prop-types": { + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.5", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.5.tgz", + "integrity": "sha512-WeqMfGJLGuLCqHGYRGHxnKrXcTitc6L/nBUWfWPcTarG3t9PsquqUMuVeXZeca+mglY4Vo5GZjCi0A3Or2lnxA==", + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", + "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/react-helmet": { + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/@types/react-helmet/-/react-helmet-6.1.11.tgz", + "integrity": "sha512-0QcdGLddTERotCXo3VFlUSWO3ztraw8nZ6e3zJSgG7apwV5xt+pJUS8ewPBqT4NYB1optGLprNQzFleIY84u/g==", + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/tough-cookie": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz", + "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.5.0.tgz", + "integrity": "sha512-lHS5hvz33iUFQKuPFGheAB84LwcJ60G8vKnEhnfcK1l8kGVLro2SFYW6K0/tj8FUhRJ0VHyg1oAfg50QGbPPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.5.0", + "@typescript-eslint/type-utils": "8.5.0", + "@typescript-eslint/utils": "8.5.0", + "@typescript-eslint/visitor-keys": "8.5.0", + "graphemer": "^1.4.0", + "ignore": "^5.3.1", + "natural-compare": "^1.4.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.5.0.tgz", + "integrity": "sha512-gF77eNv0Xz2UJg/NbpWJ0kqAm35UMsvZf1GHj8D9MRFTj/V3tAciIWXfmPLsAAF/vUlpWPvUDyH1jjsr0cMVWw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "8.5.0", + "@typescript-eslint/types": "8.5.0", + "@typescript-eslint/typescript-estree": "8.5.0", + "@typescript-eslint/visitor-keys": "8.5.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.5.0.tgz", + "integrity": "sha512-06JOQ9Qgj33yvBEx6tpC8ecP9o860rsR22hWMEd12WcTRrfaFgHr2RB/CA/B+7BMhHkXT4chg2MyboGdFGawYg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.5.0", + "@typescript-eslint/visitor-keys": "8.5.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.5.0.tgz", + "integrity": "sha512-N1K8Ix+lUM+cIDhL2uekVn/ZD7TZW+9/rwz8DclQpcQ9rk4sIL5CAlBC0CugWKREmDjBzI/kQqU4wkg46jWLYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "8.5.0", + "@typescript-eslint/utils": "8.5.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.5.0.tgz", + "integrity": "sha512-qjkormnQS5wF9pjSi6q60bKUHH44j2APxfh9TQRXK8wbYVeDYYdYJGIROL87LGZZ2gz3Rbmjc736qyL8deVtdw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.5.0.tgz", + "integrity": "sha512-vEG2Sf9P8BPQ+d0pxdfndw3xIXaoSjliG0/Ejk7UggByZPKXmJmw3GW5jV2gHNQNawBUyfahoSiCFVov0Ruf7Q==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "8.5.0", + "@typescript-eslint/visitor-keys": "8.5.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.5.0.tgz", + "integrity": "sha512-6yyGYVL0e+VzGYp60wvkBHiqDWOpT63pdMV2CVG4LVDd5uR6q1qQN/7LafBZtAtNIn/mqXjsSeS5ggv/P0iECw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.5.0", + "@typescript-eslint/types": "8.5.0", + "@typescript-eslint/typescript-estree": "8.5.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.5.0.tgz", + "integrity": "sha512-yTPqMnbAZJNy2Xq2XU8AdtOW9tJIr+UQb64aXB9f3B1498Zx9JorVgFJcZpEc9UBuCCrdzKID2RGAMkYcDtZOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.5.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "license": "ISC" + }, + "node_modules/abab": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", + "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "deprecated": "Use your platform's native atob() and btoa() methods instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/acorn": { + "version": "8.12.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", + "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-globals": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz", + "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.1.0", + "acorn-walk": "^8.0.2" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", + "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", + "license": "MIT", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/aria-hidden": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.4.tgz", + "integrity": "sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", + "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", + "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", + "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz", + "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", + "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.0.tgz", + "integrity": "sha512-Mr2ZakwQ7XUAjp7pAwQWRhhK8mQQ6JAaNWSjmjxil0R8BPioMtQsTLOolGYkji1rcL++3dCqZA3zWqpT+9Ew6g==", + "dev": true, + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz", + "integrity": "sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bignumber.js": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.1.2.tgz", + "integrity": "sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001660", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001660.tgz", + "integrity": "sha512-GacvNTTuATm26qC74pt+ad1fW15mlQ/zuTzzY1ZoIzECTP8HURDfF43kNxPgf7H1jmelCBQTTbBNxdSXOA7Bqg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.1.tgz", + "integrity": "sha512-cuSVIHi9/9E/+821Qjdvngor+xpnlwnuwIyZOaLmHBVdXL+gP+I6QQB9VkO7RI77YIcTV+S1W9AreJ5eN63JBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/class-variance-authority": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.0.tgz", + "integrity": "sha512-jFI8IQw4hczaL4ALINxqLEXQbWcNjoSkloa4IaufXCJr6QawJyw7tuRysRsrE8w2p/4gGaxKIt/hX3qz/IbD1A==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "2.0.0" + }, + "funding": { + "url": "https://joebell.co.uk" + } + }, + "node_modules/class-variance-authority/node_modules/clsx": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", + "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cosmiconfig": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", + "integrity": "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cosmiconfig/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/cosmiconfig/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css-functions-list": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.2.2.tgz", + "integrity": "sha512-c+N0v6wbKVxTu5gOBBFkr9BEdBWaqqjQeiJ8QvSRIJOf+UxlJh930m8e6/WNeODIK0mYLFkoONrnj16i2EcvfQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12 || >=16" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssom": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", + "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssstyle": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", + "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssom": "~0.3.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cssstyle/node_modules/cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true, + "license": "MIT" + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "license": "MIT" + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", + "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/data-urls": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", + "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "abab": "^2.0.6", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/data-urls/node_modules/tr46": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", + "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/data-urls/node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/data-urls/node_modules/whatwg-url": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", + "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^3.0.0", + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/data-view-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz", + "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz", + "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz", + "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", + "dev": true, + "license": "MIT" + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dedent": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz", + "integrity": "sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-equal": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.3.tgz", + "integrity": "sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.5", + "es-get-iterator": "^1.1.3", + "get-intrinsic": "^1.2.2", + "is-arguments": "^1.1.1", + "is-array-buffer": "^3.0.2", + "is-date-object": "^1.0.5", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.2", + "isarray": "^2.0.5", + "object-is": "^1.1.5", + "object-keys": "^1.1.1", + "object.assign": "^4.1.4", + "regexp.prototype.flags": "^1.5.1", + "side-channel": "^1.0.4", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "license": "Apache-2.0" + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dom-accessibility-api": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", + "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/domexception": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", + "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", + "deprecated": "Use your platform's native DOMException instead", + "dev": true, + "license": "MIT", + "dependencies": { + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/domexception/node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.22", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.22.tgz", + "integrity": "sha512-tKYm5YHPU1djz0O+CGJ+oJIvimtsCcwR2Z9w7Skh08lUdyzXY5djods3q+z2JkWdb7tCcmM//eVavSRAiaPRNg==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/enhanced-resolve": { + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-abstract": { + "version": "1.23.3", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", + "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.3", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.13", + "is-weakref": "^1.0.2", + "object-inspect": "^1.13.1", + "object-keys": "^1.1.1", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.15" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-get-iterator": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", + "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "has-symbols": "^1.0.3", + "is-arguments": "^1.1.1", + "is-map": "^2.0.2", + "is-set": "^2.0.2", + "is-string": "^1.0.7", + "isarray": "^2.0.5", + "stop-iteration-iterator": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.19.tgz", + "integrity": "sha512-zoMwbCcH5hwUkKJkT8kDIBZSz9I6mVG//+lDCinLCGov4+r7NIy0ld8o03M0cJxl2spVf6ESYVS6/gpIfq1FFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", + "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", + "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", + "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.0" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-next": { + "version": "14.2.14", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.14.tgz", + "integrity": "sha512-TXwyjGICAlWC9O0OufS3koTsBKQH8l1xt3SY/aDuvtKHIwjTHplJKWVb1WOEX0OsDaxGbFXmfD2EY1sNfG0Y/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@next/eslint-plugin-next": "14.2.14", + "@rushstack/eslint-patch": "^1.3.3", + "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-import-resolver-typescript": "^3.5.2", + "eslint-plugin-import": "^2.28.1", + "eslint-plugin-jsx-a11y": "^6.7.1", + "eslint-plugin-react": "^7.33.2", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" + }, + "peerDependencies": { + "eslint": "^7.23.0 || ^8.0.0", + "typescript": ">=3.3.1" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-config-prettier": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz", + "integrity": "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==", + "dev": true, + "license": "MIT", + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-import-resolver-typescript": { + "version": "3.6.3", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.6.3.tgz", + "integrity": "sha512-ud9aw4szY9cCT1EWWdGv1L1XR6hh2PaRWif0j2QjQ0pgTY/69iw+W0Z4qZv5wHahOl8isEr+k/JnyAqNQkLkIA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@nolyfill/is-core-module": "1.0.39", + "debug": "^4.3.5", + "enhanced-resolve": "^5.15.0", + "eslint-module-utils": "^2.8.1", + "fast-glob": "^3.3.2", + "get-tsconfig": "^4.7.5", + "is-bun-module": "^1.0.2", + "is-glob": "^4.0.3" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts/projects/eslint-import-resolver-ts" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*", + "eslint-plugin-import-x": "*" + }, + "peerDependenciesMeta": { + "eslint-plugin-import": { + "optional": true + }, + "eslint-plugin-import-x": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.0.tgz", + "integrity": "sha512-wALZ0HFoytlyh/1+4wuZ9FJCD/leWHQzzrxJ8+rebyReSLk7LApMyd3WJaLVoN+D5+WIdJyDK1c6JnE65V4Zyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.30.0.tgz", + "integrity": "sha512-/mHNE9jINJfiD2EKkg1BKyPyUk4zdnT54YgbOgfjSakWT5oyX/qQLVNTkehyfpcMxZXMy1zyonZ2v7hZTX43Yw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rtsao/scc": "^1.1.0", + "array-includes": "^3.1.8", + "array.prototype.findlastindex": "^1.2.5", + "array.prototype.flat": "^1.3.2", + "array.prototype.flatmap": "^1.3.2", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.9.0", + "hasown": "^2.0.2", + "is-core-module": "^2.15.1", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "object.groupby": "^1.0.3", + "object.values": "^1.2.0", + "semver": "^6.3.1", + "tsconfig-paths": "^3.15.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" + } + }, + "node_modules/eslint-plugin-import/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-import/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.0.tgz", + "integrity": "sha512-ySOHvXX8eSN6zz8Bywacm7CvGNhUtdjvqfQDVe6020TUK34Cywkw7m0KsCCk1Qtm9G1FayfTN1/7mMYnYO2Bhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "aria-query": "~5.1.3", + "array-includes": "^3.1.8", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "^4.10.0", + "axobject-query": "^4.1.0", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "es-iterator-helpers": "^1.0.19", + "hasown": "^2.0.2", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.8", + "safe-regex-test": "^1.0.3", + "string.prototype.includes": "^2.0.0" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9" + } + }, + "node_modules/eslint-plugin-jsx-a11y/node_modules/aria-query": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", + "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "deep-equal": "^2.0.5" + } + }, + "node_modules/eslint-plugin-jsx-a11y/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint-plugin-jsx-a11y/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint-plugin-prettier": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz", + "integrity": "sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "prettier-linter-helpers": "^1.0.0", + "synckit": "^0.9.1" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-plugin-prettier" + }, + "peerDependencies": { + "@types/eslint": ">=8.0.0", + "eslint": ">=8.0.0", + "eslint-config-prettier": "*", + "prettier": ">=3.0.0" + }, + "peerDependenciesMeta": { + "@types/eslint": { + "optional": true + }, + "eslint-config-prettier": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.37.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.1.tgz", + "integrity": "sha512-xwTnwDqzbDRA8uJ7BMxPs/EXRB3i8ZfnOIp8BsxEQkT0nHPp+WWceqGgo6rKb9ctNi8GJLDT4Go5HAWELa/WMg==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.8", + "array.prototype.findlast": "^1.2.5", + "array.prototype.flatmap": "^1.3.2", + "array.prototype.tosorted": "^1.1.4", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.19", + "estraverse": "^5.3.0", + "hasown": "^2.0.2", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.8", + "object.fromentries": "^2.0.8", + "object.values": "^1.2.0", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.11", + "string.prototype.repeat": "^1.0.0" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/eslint/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-diff": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz", + "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.2.tgz", + "integrity": "sha512-GR6f0hD7XXyNJa25Tb9BuIdN0tdr+0BMi6/CJPH3wJO1JjNG3n/VsSw38AwRdKZABm8lGbPfakLRkYzx2V9row==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true, + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", + "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gaxios": { + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", + "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", + "license": "Apache-2.0", + "dependencies": { + "extend": "^3.0.2", + "https-proxy-agent": "^7.0.1", + "is-stream": "^2.0.0", + "node-fetch": "^2.6.9", + "uuid": "^9.0.1" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gcp-metadata": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.0.tgz", + "integrity": "sha512-Jh/AIwwgaxan+7ZUUmRLCjtchyDiqh4KjBJ5tW3plBZb5iL/BPcso8A5DlzeD9qlw0duCamnNdpFjxwaT0KyKg==", + "license": "Apache-2.0", + "dependencies": { + "gaxios": "^6.0.0", + "json-bigint": "^1.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", + "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.8.1.tgz", + "integrity": "sha512-k9PN+cFBmaLWtVz29SkUoqU5O0slLuHJXt/2P+tMVFT+phsSGXGkp9t3rQIqdz0e+06EHNGs3oM6ZX1s2zHxRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/globalthis": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globjoin": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/globjoin/-/globjoin-0.1.4.tgz", + "integrity": "sha512-xYfnw62CKG8nLkZBfWbhWwDw02CHty86jfPcc2cr3ZfeuK9ysoVPPEUxf21bAD/rWAgk52SuBrLJlefNy8mvFg==", + "dev": true, + "license": "MIT" + }, + "node_modules/google-auth-library": { + "version": "9.14.1", + "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.14.1.tgz", + "integrity": "sha512-Rj+PMjoNFGFTmtItH7gHfbHpGVSb3vmnGK3nwNBqxQF9NoBpttSZI/rc0WiM63ma2uGDQtYEkMHkK9U6937NiA==", + "license": "Apache-2.0", + "dependencies": { + "base64-js": "^1.3.0", + "ecdsa-sig-formatter": "^1.0.11", + "gaxios": "^6.1.1", + "gcp-metadata": "^6.1.0", + "gtoken": "^7.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/gtoken": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", + "integrity": "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==", + "license": "MIT", + "dependencies": { + "gaxios": "^6.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/has-bigints": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", + "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-url-attributes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.0.tgz", + "integrity": "sha512-/sXbVCWayk6GDVg3ctOX6nxaVj7So40FcFAnWlWGNAB1LpYKcV5Cd10APjPjW80O7zYW2MsjBV4zZ7IZO5fVow==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/http-proxy-agent/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz", + "integrity": "sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, + "node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==", + "license": "MIT" + }, + "node_modules/internal-slot": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", + "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.0", + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", + "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bun-module": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-bun-module/-/is-bun-module-1.2.1.tgz", + "integrity": "sha512-AmidtEM6D6NmUiLOvvU7+IePxjEjOzra2h0pSrsfSAcXwl/83zLLXDByafUJy9k/rKK0pvXMLdwKwGHlX2Ke6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.6.3" + } + }, + "node_modules/is-bun-module/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.15.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", + "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", + "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", + "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-regex": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", + "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-string": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", + "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz", + "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/iterator.prototype": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz", + "integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-circus/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-config/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/jest-config/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-config/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-config/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-config/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-diff/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-diff/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-diff/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-each/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-environment-jsdom": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz", + "integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/jsdom": "^20.0.0", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0", + "jsdom": "^20.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-leak-detector/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-leak-detector/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-leak-detector/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-matcher-utils/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-message-util/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/jest-runtime/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-runtime/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-snapshot/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-validate/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "20.0.3", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-20.0.3.tgz", + "integrity": "sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "abab": "^2.0.6", + "acorn": "^8.8.1", + "acorn-globals": "^7.0.0", + "cssom": "^0.5.0", + "cssstyle": "^2.3.0", + "data-urls": "^3.0.2", + "decimal.js": "^10.4.2", + "domexception": "^4.0.0", + "escodegen": "^2.0.0", + "form-data": "^4.0.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.1", + "is-potential-custom-element-name": "^1.0.1", + "nwsapi": "^2.2.2", + "parse5": "^7.1.1", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^4.1.2", + "w3c-xmlserializer": "^4.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^2.0.0", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0", + "ws": "^8.11.0", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsdom/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/jsdom/node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/jsdom/node_modules/tr46": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", + "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/jsdom/node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/jsdom/node_modules/whatwg-url": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", + "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^3.0.0", + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-bigint": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", + "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "license": "MIT", + "dependencies": { + "bignumber.js": "^9.0.0" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonparse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.0.0.tgz", + "integrity": "sha512-X8/pl4Q98FceotM1YzNYRcdRpYKgKB6nOKW9oNZk4qssFGfSxA6tUyPgEYsZA+MAQ8nR6nBZwE2GKpeeOIeN3A==", + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/jsonstream": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/jsonstream/-/jsonstream-1.0.3.tgz", + "integrity": "sha512-yMDWHPGzQRwdiXy6itGgemEviwAoDp5i78y7rdzUa9RVe01vLJku+byVtqxwwSvp1y9LYHWYd+8MkYvbeilCug==", + "deprecated": "use JSONStream instead", + "dependencies": { + "jsonparse": "~1.0.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "jsonstream": "index.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jsonstream-next": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/jsonstream-next/-/jsonstream-next-3.0.0.tgz", + "integrity": "sha512-aAi6oPhdt7BKyQn1SrIIGZBt0ukKuOUE1qV6kJ3GgioSOYzsRc8z9Hfr1BVmacA/jLe9nARfmgMGgn68BqIAgg==", + "license": "(MIT OR Apache-2.0)", + "dependencies": { + "jsonparse": "^1.2.0", + "through2": "^4.0.2" + }, + "bin": { + "jsonstream-next": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jsonstream-next/node_modules/jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==", + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/jwa": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", + "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", + "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/known-css-properties": { + "version": "0.34.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.34.0.tgz", + "integrity": "sha512-tBECoUqNFbyAY4RrbqsBQqDFpGXAEbdD5QKr8kACx3+rnArmuuR22nKQWKazvp07N9yjTyDZaw/20UIH8tL9DQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/language-subtag-registry": { + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/language-tags": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", + "dev": true, + "license": "MIT", + "dependencies": { + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.castarray": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.castarray/-/lodash.castarray-4.4.0.tgz", + "integrity": "sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.truncate": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", + "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC" + }, + "node_modules/lucide-react": { + "version": "0.439.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.439.0.tgz", + "integrity": "sha512-PafSWvDTpxdtNEndS2HIHxcNAbd54OaqSYJO90/b63rab2HWYqDbH194j0i82ZFdWOAcf0AHinRykXRRK2PJbw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" + } + }, + "node_modules/lz-string": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", + "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "lz-string": "bin/bin.js" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mathml-tag-names": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", + "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", + "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", + "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/meow": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-13.2.0.tgz", + "integrity": "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz", + "integrity": "sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", + "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", + "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz", + "integrity": "sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/next": { + "version": "14.2.9", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.9.tgz", + "integrity": "sha512-3CzBNo6BuJnRjcQvRw+irnU1WiuJNZEp+dkzkt91y4jeIDN/Emg95F+takSYiLpJ/HkxClVQRyqiTwYce5IVqw==", + "license": "MIT", + "dependencies": { + "@next/env": "14.2.9", + "@swc/helpers": "0.5.5", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", + "postcss": "8.4.31", + "styled-jsx": "5.1.1" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=18.17.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "14.2.9", + "@next/swc-darwin-x64": "14.2.9", + "@next/swc-linux-arm64-gnu": "14.2.9", + "@next/swc-linux-arm64-musl": "14.2.9", + "@next/swc-linux-x64-gnu": "14.2.9", + "@next/swc-linux-x64-musl": "14.2.9", + "@next/swc-win32-arm64-msvc": "14.2.9", + "@next/swc-win32-ia32-msvc": "14.2.9", + "@next/swc-win32-x64-msvc": "14.2.9" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nwsapi": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.12.tgz", + "integrity": "sha512-qXDmcVlZV4XRtKFzddidpfVP4oMSGhga+xdMc25mv8kaLUHtgzCDhUxkrN8exkGdTlLNaXj7CV3GtON7zuGZ+w==", + "dev": true, + "license": "MIT" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-is": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz", + "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz", + "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.values": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz", + "integrity": "sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz", + "integrity": "sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==", + "license": "BlueOak-1.0.0" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.45", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz", + "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.1", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", + "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.0.0", + "yaml": "^2.3.4" + }, + "engines": { + "node": ">= 14" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-load-config/node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-resolve-nested-selector": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/postcss-resolve-nested-selector/-/postcss-resolve-nested-selector-0.1.6.tgz", + "integrity": "sha512-0sglIs9Wmkzbr8lQwEyIzlDOOC9bGmfVKcJTaxv3vMmd3uo4o4DerC3En0bnmgceeql9BfC8hRkp7cg0fjdVqw==", + "dev": true, + "license": "MIT" + }, + "node_modules/postcss-safe-parser": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-7.0.0.tgz", + "integrity": "sha512-ovehqRNVCpuFzbXoTb4qLtyzK3xn3t/CUBxOs8LsnQjQrShaB4lKiHoVqY8ANaC0hBMHq5QVWk77rwGklFUDrg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss-safe-parser" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", + "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-linter-helpers": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz", + "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-diff": "^1.1.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", + "dev": true, + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/react-markdown": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-9.0.1.tgz", + "integrity": "sha512-186Gw/vF1uRkydbsOIkcGXw7aHq0sZOCRFFjGrr7b9+nVZg4UfA4enXCaxm4fUzecU38sWfrNDitGhshuU7rdg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.5.7", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", + "integrity": "sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.4", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz", + "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==", + "license": "MIT", + "dependencies": { + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "license": "MIT", + "dependencies": { + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.6.tgz", + "integrity": "sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "dev": true, + "license": "MIT" + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", + "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", + "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.0.tgz", + "integrity": "sha512-z3tJrAs2kIs1AqIIy6pzHmAHlF1hWQ+OdY4/hv+Wxe35EhyLKcajL33iUEn3ScxtFox9nUvRufR/Zre8Q08H/g==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", + "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-regex-test": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", + "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-regex": "^1.1.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, + "node_modules/slice-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/stop-iteration-iterator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", + "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "internal-slot": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-length/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-length/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string.prototype.includes": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.0.tgz", + "integrity": "sha512-E34CkBgyeqNDcrbU76cDjL5JLcVrtSdYq0MEh/B10r17pRP4ciHLwTgnuLV8Ay6cgEMLkcBkFCKyFZ43YldYzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.11.tgz", + "integrity": "sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.repeat": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz", + "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", + "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", + "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", + "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/stylelint": { + "version": "16.9.0", + "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-16.9.0.tgz", + "integrity": "sha512-31Nm3WjxGOBGpQqF43o3wO9L5AC36TPIe6030Lnm13H3vDMTcS21DrLh69bMX+DBilKqMMVLian4iG6ybBoNRQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/stylelint" + }, + { + "type": "github", + "url": "https://github.com/sponsors/stylelint" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.1", + "@csstools/css-tokenizer": "^3.0.1", + "@csstools/media-query-list-parser": "^3.0.1", + "@csstools/selector-specificity": "^4.0.0", + "@dual-bundle/import-meta-resolve": "^4.1.0", + "balanced-match": "^2.0.0", + "colord": "^2.9.3", + "cosmiconfig": "^9.0.0", + "css-functions-list": "^3.2.2", + "css-tree": "^2.3.1", + "debug": "^4.3.6", + "fast-glob": "^3.3.2", + "fastest-levenshtein": "^1.0.16", + "file-entry-cache": "^9.0.0", + "global-modules": "^2.0.0", + "globby": "^11.1.0", + "globjoin": "^0.1.4", + "html-tags": "^3.3.1", + "ignore": "^5.3.2", + "imurmurhash": "^0.1.4", + "is-plain-object": "^5.0.0", + "known-css-properties": "^0.34.0", + "mathml-tag-names": "^2.1.3", + "meow": "^13.2.0", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "picocolors": "^1.0.1", + "postcss": "^8.4.41", + "postcss-resolve-nested-selector": "^0.1.6", + "postcss-safe-parser": "^7.0.0", + "postcss-selector-parser": "^6.1.2", + "postcss-value-parser": "^4.2.0", + "resolve-from": "^5.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^7.1.0", + "supports-hyperlinks": "^3.1.0", + "svg-tags": "^1.0.0", + "table": "^6.8.2", + "write-file-atomic": "^5.0.1" + }, + "bin": { + "stylelint": "bin/stylelint.mjs" + }, + "engines": { + "node": ">=18.12.0" + } + }, + "node_modules/stylelint-config-recommended": { + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-14.0.1.tgz", + "integrity": "sha512-bLvc1WOz/14aPImu/cufKAZYfXs/A/owZfSMZ4N+16WGXLoX5lOir53M6odBxvhgmgdxCVnNySJmZKx73T93cg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/stylelint" + }, + { + "type": "github", + "url": "https://github.com/sponsors/stylelint" + } + ], + "license": "MIT", + "engines": { + "node": ">=18.12.0" + }, + "peerDependencies": { + "stylelint": "^16.1.0" + } + }, + "node_modules/stylelint-config-standard": { + "version": "36.0.1", + "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-36.0.1.tgz", + "integrity": "sha512-8aX8mTzJ6cuO8mmD5yon61CWuIM4UD8Q5aBcWKGSf6kg+EC3uhB+iOywpTK4ca6ZL7B49en8yanOFtUW0qNzyw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/stylelint" + }, + { + "type": "github", + "url": "https://github.com/sponsors/stylelint" + } + ], + "license": "MIT", + "dependencies": { + "stylelint-config-recommended": "^14.0.1" + }, + "engines": { + "node": ">=18.12.0" + }, + "peerDependencies": { + "stylelint": "^16.1.0" + } + }, + "node_modules/stylelint-config-tailwindcss": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/stylelint-config-tailwindcss/-/stylelint-config-tailwindcss-0.0.7.tgz", + "integrity": "sha512-n2dCWH+0ppr0/by4EYCLWW7g5LU+l4UzUIsYS7xbVHqvm9UWa7UhltNdNiz5NmLF/FmbJR4Yd/v9DuUGvLw1Tg==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "stylelint": ">=13.13.1", + "tailwindcss": ">=2.2.16" + } + }, + "node_modules/stylelint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/balanced-match": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-2.0.0.tgz", + "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", + "dev": true, + "license": "MIT" + }, + "node_modules/stylelint/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/stylelint/node_modules/file-entry-cache": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-9.1.0.tgz", + "integrity": "sha512-/pqPFG+FdxWQj+/WSuzXSDaNzxgTLr/OrR1QuqfEZzDakpdYE70PwUxL7BPUa8hpjbvY1+qvCl8k+8Tq34xJgg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/stylelint/node_modules/flat-cache": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-5.0.0.tgz", + "integrity": "sha512-JrqFmyUl2PnPi1OvLyTVHnQvwQ0S+e6lGSwu8OkAZlSaNIZciTY2H/cOOROxsBA1m/LZNHDsqAgDZt6akWcjsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.3.1", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/stylelint/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/string-width/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/stylelint/node_modules/write-file-atomic": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-5.0.1.tgz", + "integrity": "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.1.0.tgz", + "integrity": "sha512-2rn0BZ+/f7puLOHZm1HOJfwBggfaHXUpPUSSG/SWM4TWp5KCfmNYwnC3hruy2rZlMnmWZ+QAGpZfchu3f3695A==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-tags": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", + "integrity": "sha512-ovssysQTa+luh7A5Weu3Rta6FJlFBBbInjOh722LIt6klpU2/HtdUbszju/G4devcvk8PGt7FCLv5wftu3THUA==", + "dev": true + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/synckit": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.1.tgz", + "integrity": "sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@pkgr/core": "^0.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/table": { + "version": "6.8.2", + "resolved": "https://registry.npmjs.org/table/-/table-6.8.2.tgz", + "integrity": "sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "ajv": "^8.0.1", + "lodash.truncate": "^4.4.2", + "slice-ansi": "^4.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/table/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/table/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/table/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, + "node_modules/table/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/table/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tailwind-merge": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.2.tgz", + "integrity": "sha512-kjEBm+pvD+6eAwzJL2Bi+02/9LFLal1Gs61+QB7HvTfQQ0aXwC5LGT8PEt1gS0CWKktKe6ysPTAy3cBC5MeiIg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.11", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.11.tgz", + "integrity": "sha512-qhEuBcLemjSJk5ajccN9xJFtM/h0AVCPaA6C92jNP+M2J8kX+eMJHI7R2HFKUvvAsMpcfLILMCFYSeDwpMmlUg==", + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.5.3", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.0", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.0", + "lilconfig": "^2.1.0", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", + "postcss-selector-parser": "^6.0.11", + "resolve": "^1.22.2", + "sucrase": "^3.32.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tailwindcss-animate": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz", + "integrity": "sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==", + "license": "MIT", + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "license": "MIT" + }, + "node_modules/through2": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/through2/-/through2-4.0.2.tgz", + "integrity": "sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==", + "license": "MIT", + "dependencies": { + "readable-stream": "3" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-api-utils": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", + "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "license": "Apache-2.0" + }, + "node_modules/tsconfig-paths": { + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tsconfig-paths/node_modules/json5": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/tsconfig-paths/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/tslib": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz", + "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", + "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", + "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", + "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", + "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", + "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/undici-types": { + "version": "6.19.8", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", + "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", + "dev": true, + "license": "MIT" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", + "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "license": "MIT", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz", + "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-mimetype": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", + "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", + "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.4.tgz", + "integrity": "sha512-bppkmBSsHFmIMSl8BO9TbsyzsvGjVoppt8xUiGzwiu/bhDCGxnpOKCxgqj6GuyHE0mINMDecBFPlOm2hzY084w==", + "dev": true, + "license": "MIT", + "dependencies": { + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.15" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/write-file-atomic/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.1.tgz", + "integrity": "sha512-bLQOjaX/ADgQ20isPJRvF0iRUHIxVhYvr53Of7wGcWlO2jvtUlH5m87DsmulFVxRpNLOnI4tB6p/oh8D7kpn9Q==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/search/grounded-generation-playground/package.json b/search/grounded-generation-playground/package.json new file mode 100644 index 0000000000..98bb7ca808 --- /dev/null +++ b/search/grounded-generation-playground/package.json @@ -0,0 +1,69 @@ +{ + "name": "gg_demo_nextjs", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev", + "build": "next build", + "lint": "next lint", + "gcp-build": "next build", + "test": "jest", + "test:watch": "jest --watch", + "format": "prettier --check './**/*.{js,jsx,ts,tsx,css,md,json}' --config ./.prettierrc", + "format:fix": "prettier --write './**/*.{js,jsx,ts,tsx,css,md,json}' --config ./.prettierrc", + "start": "next start -p 8080" + }, + "dependencies": { + "@google-cloud/vertexai": "^1.7.0", + "@radix-ui/react-accordion": "^1.2.1", + "@radix-ui/react-avatar": "^1.1.0", + "@radix-ui/react-icons": "^1.3.0", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-scroll-area": "^1.1.0", + "@radix-ui/react-select": "^2.1.1", + "@radix-ui/react-slider": "^1.2.0", + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.0", + "@radix-ui/react-tabs": "^1.1.0", + "@radix-ui/react-tooltip": "^1.1.2", + "@types/react-helmet": "^6.1.11", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "gaxios": "^6.7.1", + "google-auth-library": "^9.14.1", + "jsonstream": "^1.0.3", + "jsonstream-next": "^3.0.0", + "lucide-react": "^0.439.0", + "next": "14.2.9", + "react": "^18", + "react-dom": "^18", + "react-markdown": "^9.0.1", + "remark-gfm": "^4.0.0", + "tailwind-merge": "^2.5.2", + "tailwindcss-animate": "^1.0.7" + }, + "devDependencies": { + "@tailwindcss/typography": "^0.5.15", + "@testing-library/jest-dom": "^6.5.0", + "@testing-library/react": "^16.0.1", + "@types/node": "^20.16.5", + "@types/node-fetch": "^2.6.11", + "@types/react": "^18", + "@types/react-dom": "^18", + "@typescript-eslint/eslint-plugin": "^8.5.0", + "@typescript-eslint/parser": "^8.5.0", + "eslint": "^8.0.0", + "eslint-config-next": "^14.2.14", + "eslint-config-prettier": "^9.1.0", + "eslint-plugin-prettier": "^5.2.1", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0", + "postcss": "^8", + "prettier": "^3.3.3", + "stylelint": "^16.9.0", + "stylelint-config-standard": "^36.0.1", + "stylelint-config-tailwindcss": "^0.0.7", + "tailwindcss": "^3.4.1", + "typescript": "^5" + } +} diff --git a/search/grounded-generation-playground/postcss.config.mjs b/search/grounded-generation-playground/postcss.config.mjs new file mode 100644 index 0000000000..da0cd989df --- /dev/null +++ b/search/grounded-generation-playground/postcss.config.mjs @@ -0,0 +1,24 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @type {import('postcss-load-config').Config} */ +const config = { + plugins: { + tailwindcss: {}, + }, +}; + +export default config; diff --git a/search/grounded-generation-playground/src/app/api/chat/route.ts b/search/grounded-generation-playground/src/app/api/chat/route.ts new file mode 100644 index 0000000000..db687018e7 --- /dev/null +++ b/search/grounded-generation-playground/src/app/api/chat/route.ts @@ -0,0 +1,134 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Grounded generation for chat + * + */ +import { NextRequest, NextResponse } from 'next/server'; +import { GoogleAuth } from 'google-auth-library'; +import { + responseCandidateToResult, + iteratorToStream, + processApiResponse, + mapOptionsToGroundedGenerationRequest, +} from '@/lib/apiutils'; + +const PROJECT_NUMBER = process.env.PROJECT_NUMBER; +const API_ENDPOINT = `https://discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/global:streamGenerateGroundedContent`; + +export async function POST(req: NextRequest) { + console.log('Received POST request to /api/chat'); + + const { + messages, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, + temperature, + retrievalThreshold, + } = await req.json(); + if (!messages || messages.length === 0) { + throw new Error('No messages provided'); + } + const lastMessage = messages[messages.length - 1]; + console.log('Request body (excluding session):', { + lastMessage, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, + retrievalThreshold, + temperature, + }); + + // Use Google Auth Library to get the access token + const auth = new GoogleAuth({ + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }); + const client = await auth.getClient(); + const accessToken = await client.getAccessToken(); + + if (!accessToken.token) { + throw new Error('Failed to obtain access token'); + } + + // Map the session history to contents parts + const contents = messages + .filter((message: { role: string; content: string }) => { + return message.content.trim() !== '' && message.role.trim() !== ''; + }) + .map((message: { role: string; content: string }) => ({ + role: message.role, + parts: [{ text: message.content }], + })); + const systemInstruction = { + parts: { + text: 'You are a helpful AI assistant. Answers should be somewhere around 3 scentences long. Prefer citations from facts.', + }, + }; + const requestBody = mapOptionsToGroundedGenerationRequest({ + systemInstruction, + contents, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, + }); + + console.log('Sending request to Discovery Engine API'); + console.log('Request body:', JSON.stringify([requestBody])); + + try { + const response = await fetch(API_ENDPOINT, { + method: 'POST', + headers: { + Authorization: `Bearer ${accessToken.token}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify([requestBody]), + }); + + console.log('Received response from Discovery Engine API'); + console.log('Response status:', response.status); + console.log('Response headers:', response.headers); + + if (!response.ok) { + const errorText = await response.text(); + console.error('Error response from API:', errorText); + throw new Error(`API request failed with status ${response.status}: ${errorText}`); + } + + const iterator = processApiResponse(response); + const stream = iteratorToStream(iterator); + + return new NextResponse(stream, { + headers: { + 'Content-Type': 'text/plain', + 'Transfer-Encoding': 'chunked', + 'Cache-Control': 'no-cache, no-transform', + 'X-Accel-Buffering': 'no', + }, + }); + } catch (error) { + console.error('Error in POST handler:', error); + return new NextResponse(JSON.stringify({ error: 'Internal Server Error' }), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }); + } +} diff --git a/search/grounded-generation-playground/src/app/api/grounded/route.ts b/search/grounded-generation-playground/src/app/api/grounded/route.ts new file mode 100644 index 0000000000..085604274e --- /dev/null +++ b/search/grounded-generation-playground/src/app/api/grounded/route.ts @@ -0,0 +1,121 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Grounded generation for side by side + * + */ +import { NextRequest, NextResponse } from 'next/server'; +import { GoogleAuth } from 'google-auth-library'; +import { + responseCandidateToResult, + iteratorToStream, + processApiResponse, + mapOptionsToGroundedGenerationRequest, +} from '@/lib/apiutils'; + +const PROJECT_NUMBER = process.env.PROJECT_NUMBER; +const API_ENDPOINT = `https://discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/global:streamGenerateGroundedContent`; + +export async function POST(req: NextRequest) { + console.log('Received POST request to /api/grounded'); + + const { query, model, googleGrounding, vertexGrounding, vertexConfigId, temperature } = + await req.json(); + if (!query || query.length === 0) { + throw new Error('No query provided'); + } + console.log('Request body:', { + query, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, + }); + + // Use Google Auth Library to get the access token + const auth = new GoogleAuth({ + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }); + const client = await auth.getClient(); + const accessToken = await client.getAccessToken(); + + if (!accessToken.token) { + throw new Error('Failed to obtain access token'); + } + // Single turn content, just a user query. + const contents = [ + { + role: 'user', + parts: [{ text: query }], + }, + ]; + const systemInstruction = { + parts: { + text: 'You are a helpful AI assistant using grounding sources.', + }, + }; + const requestBody = mapOptionsToGroundedGenerationRequest({ + systemInstruction, + contents, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, + }); + + console.log('Sending request to Discovery Engine API'); + console.log('Request body:', JSON.stringify([requestBody])); + + try { + const response = await fetch(API_ENDPOINT, { + method: 'POST', + headers: { + Authorization: `Bearer ${accessToken.token}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify([requestBody]), + }); + + console.log('Received response from Discovery Engine API'); + console.log('Response status:', response.status); + console.log('Response headers:', response.headers); + + if (!response.ok) { + const errorText = await response.text(); + console.error('Error response from API:', errorText); + throw new Error(`API request failed with status ${response.status}: ${errorText}`); + } + + const iterator = processApiResponse(response); + const stream = iteratorToStream(iterator); + + return new NextResponse(stream, { + headers: { + 'Content-Type': 'text/plain', + 'Transfer-Encoding': 'chunked', + 'Cache-Control': 'no-cache, no-transform', + 'X-Accel-Buffering': 'no', + }, + }); + } catch (error) { + console.error('Error in POST handler:', error); + return new NextResponse(JSON.stringify({ error: 'Internal Server Error' }), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }); + } +} diff --git a/search/grounded-generation-playground/src/app/api/ungrounded/route.ts b/search/grounded-generation-playground/src/app/api/ungrounded/route.ts new file mode 100644 index 0000000000..3ef023ab61 --- /dev/null +++ b/search/grounded-generation-playground/src/app/api/ungrounded/route.ts @@ -0,0 +1,63 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Ungrounded generation for side by side generation + * + * + */ +import { NextRequest, NextResponse } from 'next/server'; +import { VertexAI } from '@google-cloud/vertexai'; + +const PROJECT_ID = process.env.PROJECT_ID; +const LOCATION = process.env.LOCATION; +const MODEL = 'gemini-1.5-flash-001'; + +export async function POST(req: NextRequest) { + const { query, model, googleGrounding, vertexGrounding } = await req.json(); + + const vertexAI = new VertexAI({ project: PROJECT_ID, location: LOCATION }); + const generativeModel = vertexAI.getGenerativeModel({ model: MODEL }); + + const request = { + contents: [ + { + role: 'user', + parts: [{ text: query }], + }, + ], + generationConfig: { + maxOutputTokens: 2048, + temperature: 0.9, + topP: 1, + topK: 1, + }, + }; + + const stream = new ReadableStream({ + async start(controller) { + console.log('Received POST request to /api/ungrounded'); + const result = await generativeModel.generateContentStream(request); + for await (const item of result.stream) { + const chunk = item.candidates?.[0]?.content?.parts?.[0]?.text ?? ''; + controller.enqueue(new TextEncoder().encode(chunk)); + } + controller.close(); + }, + }); + + return new NextResponse(stream); +} diff --git a/search/grounded-generation-playground/src/app/favicon.ico b/search/grounded-generation-playground/src/app/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..718d6fea4835ec2d246af9800eddb7ffb276240c GIT binary patch literal 25931 zcmeHv30#a{`}aL_*G&7qml|y<+KVaDM2m#dVr!KsA!#An?kSQM(q<_dDNCpjEux83 zLb9Z^XxbDl(w>%i@8hT6>)&Gu{h#Oeyszu?xtw#Zb1mO{pgX9699l+Qppw7jXaYf~-84xW z)w4x8?=youko|}Vr~(D$UXIbiXABHh`p1?nn8Po~fxRJv}|0e(BPs|G`(TT%kKVJAdg5*Z|x0leQq0 zkdUBvb#>9F()jo|T~kx@OM8$9wzs~t2l;K=woNssA3l6|sx2r3+kdfVW@e^8e*E}v zA1y5{bRi+3Z`uD3{F7LgFJDdvm;nJilkzDku>BwXH(8ItVCXk*-lSJnR?-2UN%hJ){&rlvg`CDTj z)Bzo!3v7Ou#83zEDEFcKt(f1E0~=rqeEbTnMvWR#{+9pg%7G8y>u1OVRUSoox-ovF z2Ydma(;=YuBY(eI|04{hXzZD6_f(v~H;C~y5=DhAC{MMS>2fm~1H_t2$56pc$NH8( z5bH|<)71dV-_oCHIrzrT`2s-5w_+2CM0$95I6X8p^r!gHp+j_gd;9O<1~CEQQGS8) zS9Qh3#p&JM-G8rHekNmKVewU;pJRcTAog68KYo^dRo}(M>36U4Us zfgYWSiHZL3;lpWT=zNAW>Dh#mB!_@Lg%$ms8N-;aPqMn+C2HqZgz&9~Eu z4|Kp<`$q)Uw1R?y(~S>ePdonHxpV1#eSP1B;Ogo+-Pk}6#0GsZZ5!||ev2MGdh}_m z{DeR7?0-1^zVs&`AV6Vt;r3`I`OI_wgs*w=eO%_#7Kepl{B@xiyCANc(l zzIyd4y|c6PXWq9-|KM8(zIk8LPk(>a)zyFWjhT!$HJ$qX1vo@d25W<fvZQ2zUz5WRc(UnFMKHwe1| zWmlB1qdbiA(C0jmnV<}GfbKtmcu^2*P^O?MBLZKt|As~ge8&AAO~2K@zbXelK|4T<{|y4`raF{=72kC2Kn(L4YyenWgrPiv z@^mr$t{#X5VuIMeL!7Ab6_kG$&#&5p*Z{+?5U|TZ`B!7llpVmp@skYz&n^8QfPJzL z0G6K_OJM9x+Wu2gfN45phANGt{7=C>i34CV{Xqlx(fWpeAoj^N0Biu`w+MVcCUyU* zDZuzO0>4Z6fbu^T_arWW5n!E45vX8N=bxTVeFoep_G#VmNlQzAI_KTIc{6>c+04vr zx@W}zE5JNSU>!THJ{J=cqjz+4{L4A{Ob9$ZJ*S1?Ggg3klFp!+Y1@K+pK1DqI|_gq z5ZDXVpge8-cs!o|;K73#YXZ3AShj50wBvuq3NTOZ`M&qtjj#GOFfgExjg8Gn8>Vq5 z`85n+9|!iLCZF5$HJ$Iu($dm?8~-ofu}tEc+-pyke=3!im#6pk_Wo8IA|fJwD&~~F zc16osQ)EBo58U7XDuMexaPRjU@h8tXe%S{fA0NH3vGJFhuyyO!Uyl2^&EOpX{9As0 zWj+P>{@}jxH)8|r;2HdupP!vie{sJ28b&bo!8`D^x}TE$%zXNb^X1p@0PJ86`dZyj z%ce7*{^oo+6%&~I!8hQy-vQ7E)0t0ybH4l%KltWOo~8cO`T=157JqL(oq_rC%ea&4 z2NcTJe-HgFjNg-gZ$6!Y`SMHrlj}Etf7?r!zQTPPSv}{so2e>Fjs1{gzk~LGeesX%r(Lh6rbhSo_n)@@G-FTQy93;l#E)hgP@d_SGvyCp0~o(Y;Ee8{ zdVUDbHm5`2taPUOY^MAGOw*>=s7=Gst=D+p+2yON!0%Hk` zz5mAhyT4lS*T3LS^WSxUy86q&GnoHxzQ6vm8)VS}_zuqG?+3td68_x;etQAdu@sc6 zQJ&5|4(I?~3d-QOAODHpZ=hlSg(lBZ!JZWCtHHSj`0Wh93-Uk)_S%zsJ~aD>{`A0~ z9{AG(e|q3g5B%wYKRxiL2Y$8(4w6bzchKuloQW#e&S3n+P- z8!ds-%f;TJ1>)v)##>gd{PdS2Oc3VaR`fr=`O8QIO(6(N!A?pr5C#6fc~Ge@N%Vvu zaoAX2&(a6eWy_q&UwOhU)|P3J0Qc%OdhzW=F4D|pt0E4osw;%<%Dn58hAWD^XnZD= z>9~H(3bmLtxpF?a7su6J7M*x1By7YSUbxGi)Ot0P77`}P3{)&5Un{KD?`-e?r21!4vTTnN(4Y6Lin?UkSM z`MXCTC1@4A4~mvz%Rh2&EwY))LeoT=*`tMoqcEXI>TZU9WTP#l?uFv+@Dn~b(>xh2 z;>B?;Tz2SR&KVb>vGiBSB`@U7VIWFSo=LDSb9F{GF^DbmWAfpms8Sx9OX4CnBJca3 zlj9(x!dIjN?OG1X4l*imJNvRCk}F%!?SOfiOq5y^mZW)jFL@a|r-@d#f7 z2gmU8L3IZq0ynIws=}~m^#@&C%J6QFo~Mo4V`>v7MI-_!EBMMtb%_M&kvAaN)@ZVw z+`toz&WG#HkWDjnZE!6nk{e-oFdL^$YnbOCN}JC&{$#$O27@|Tn-skXr)2ml2~O!5 zX+gYoxhoc7qoU?C^3~&!U?kRFtnSEecWuH0B0OvLodgUAi}8p1 zrO6RSXHH}DMc$&|?D004DiOVMHV8kXCP@7NKB zgaZq^^O<7PoKEp72kby@W0Z!Y*Ay{&vfg#C&gG@YVR9g?FEocMUi1gSN$+V+ayF45{a zuDZDTN}mS|;BO%gEf}pjBfN2-gIrU#G5~cucA;dokXW89%>AyXJJI z9X4UlIWA|ZYHgbI z5?oFk@A=Ik7lrEQPDH!H+b`7_Y~aDb_qa=B2^Y&Ow41cU=4WDd40dp5(QS-WMN-=Y z9g;6_-JdNU;|6cPwf$ak*aJIcwL@1n$#l~zi{c{EW?T;DaW*E8DYq?Umtz{nJ&w-M zEMyTDrC&9K$d|kZe2#ws6)L=7K+{ zQw{XnV6UC$6-rW0emqm8wJoeZK)wJIcV?dST}Z;G0Arq{dVDu0&4kd%N!3F1*;*pW zR&qUiFzK=@44#QGw7k1`3t_d8&*kBV->O##t|tonFc2YWrL7_eqg+=+k;!F-`^b8> z#KWCE8%u4k@EprxqiV$VmmtiWxDLgnGu$Vs<8rppV5EajBXL4nyyZM$SWVm!wnCj-B!Wjqj5-5dNXukI2$$|Bu3Lrw}z65Lc=1G z^-#WuQOj$hwNGG?*CM_TO8Bg-1+qc>J7k5c51U8g?ZU5n?HYor;~JIjoWH-G>AoUP ztrWWLbRNqIjW#RT*WqZgPJXU7C)VaW5}MiijYbABmzoru6EmQ*N8cVK7a3|aOB#O& zBl8JY2WKfmj;h#Q!pN%9o@VNLv{OUL?rixHwOZuvX7{IJ{(EdPpuVFoQqIOa7giLVkBOKL@^smUA!tZ1CKRK}#SSM)iQHk)*R~?M!qkCruaS!#oIL1c z?J;U~&FfH#*98^G?i}pA{ z9Jg36t4=%6mhY(quYq*vSxptes9qy|7xSlH?G=S@>u>Ebe;|LVhs~@+06N<4CViBk zUiY$thvX;>Tby6z9Y1edAMQaiH zm^r3v#$Q#2T=X>bsY#D%s!bhs^M9PMAcHbCc0FMHV{u-dwlL;a1eJ63v5U*?Q_8JO zT#50!RD619#j_Uf))0ooADz~*9&lN!bBDRUgE>Vud-i5ck%vT=r^yD*^?Mp@Q^v+V zG#-?gKlr}Eeqifb{|So?HM&g91P8|av8hQoCmQXkd?7wIJwb z_^v8bbg`SAn{I*4bH$u(RZ6*xUhuA~hc=8czK8SHEKTzSxgbwi~9(OqJB&gwb^l4+m`k*Q;_?>Y-APi1{k zAHQ)P)G)f|AyjSgcCFps)Fh6Bca*Xznq36!pV6Az&m{O8$wGFD? zY&O*3*J0;_EqM#jh6^gMQKpXV?#1?>$ml1xvh8nSN>-?H=V;nJIwB07YX$e6vLxH( zqYwQ>qxwR(i4f)DLd)-$P>T-no_c!LsN@)8`e;W@)-Hj0>nJ-}Kla4-ZdPJzI&Mce zv)V_j;(3ERN3_@I$N<^|4Lf`B;8n+bX@bHbcZTopEmDI*Jfl)-pFDvo6svPRoo@(x z);_{lY<;);XzT`dBFpRmGrr}z5u1=pC^S-{ce6iXQlLGcItwJ^mZx{m$&DA_oEZ)B{_bYPq-HA zcH8WGoBG(aBU_j)vEy+_71T34@4dmSg!|M8Vf92Zj6WH7Q7t#OHQqWgFE3ARt+%!T z?oLovLVlnf?2c7pTc)~cc^($_8nyKwsN`RA-23ed3sdj(ys%pjjM+9JrctL;dy8a( z@en&CQmnV(()bu|Y%G1-4a(6x{aLytn$T-;(&{QIJB9vMox11U-1HpD@d(QkaJdEb zG{)+6Dos_L+O3NpWo^=gR?evp|CqEG?L&Ut#D*KLaRFOgOEK(Kq1@!EGcTfo+%A&I z=dLbB+d$u{sh?u)xP{PF8L%;YPPW53+@{>5W=Jt#wQpN;0_HYdw1{ksf_XhO4#2F= zyPx6Lx2<92L-;L5PD`zn6zwIH`Jk($?Qw({erA$^bC;q33hv!d!>%wRhj# zal^hk+WGNg;rJtb-EB(?czvOM=H7dl=vblBwAv>}%1@{}mnpUznfq1cE^sgsL0*4I zJ##!*B?=vI_OEVis5o+_IwMIRrpQyT_Sq~ZU%oY7c5JMIADzpD!Upz9h@iWg_>>~j zOLS;wp^i$-E?4<_cp?RiS%Rd?i;f*mOz=~(&3lo<=@(nR!_Rqiprh@weZlL!t#NCc zO!QTcInq|%#>OVgobj{~ixEUec`E25zJ~*DofsQdzIa@5^nOXj2T;8O`l--(QyU^$t?TGY^7#&FQ+2SS3B#qK*k3`ye?8jUYSajE5iBbJls75CCc(m3dk{t?- zopcER9{Z?TC)mk~gpi^kbbu>b-+a{m#8-y2^p$ka4n60w;Sc2}HMf<8JUvhCL0B&Btk)T`ctE$*qNW8L$`7!r^9T+>=<=2qaq-;ll2{`{Rg zc5a0ZUI$oG&j-qVOuKa=*v4aY#IsoM+1|c4Z)<}lEDvy;5huB@1RJPquU2U*U-;gu z=En2m+qjBzR#DEJDO`WU)hdd{Vj%^0V*KoyZ|5lzV87&g_j~NCjwv0uQVqXOb*QrQ zy|Qn`hxx(58c70$E;L(X0uZZ72M1!6oeg)(cdKO ze0gDaTz+ohR-#d)NbAH4x{I(21yjwvBQfmpLu$)|m{XolbgF!pmsqJ#D}(ylp6uC> z{bqtcI#hT#HW=wl7>p!38sKsJ`r8}lt-q%Keqy%u(xk=yiIJiUw6|5IvkS+#?JTBl z8H5(Q?l#wzazujH!8o>1xtn8#_w+397*_cy8!pQGP%K(Ga3pAjsaTbbXJlQF_+m+-UpUUent@xM zg%jqLUExj~o^vQ3Gl*>wh=_gOr2*|U64_iXb+-111aH}$TjeajM+I20xw(((>fej-@CIz4S1pi$(#}P7`4({6QS2CaQS4NPENDp>sAqD z$bH4KGzXGffkJ7R>V>)>tC)uax{UsN*dbeNC*v}#8Y#OWYwL4t$ePR?VTyIs!wea+ z5Urmc)X|^`MG~*dS6pGSbU+gPJoq*^a=_>$n4|P^w$sMBBy@f*Z^Jg6?n5?oId6f{ z$LW4M|4m502z0t7g<#Bx%X;9<=)smFolV&(V^(7Cv2-sxbxopQ!)*#ZRhTBpx1)Fc zNm1T%bONzv6@#|dz(w02AH8OXe>kQ#1FMCzO}2J_mST)+ExmBr9cva-@?;wnmWMOk z{3_~EX_xadgJGv&H@zK_8{(x84`}+c?oSBX*Ge3VdfTt&F}yCpFP?CpW+BE^cWY0^ zb&uBN!Ja3UzYHK-CTyA5=L zEMW{l3Usky#ly=7px648W31UNV@K)&Ub&zP1c7%)`{);I4b0Q<)B}3;NMG2JH=X$U zfIW4)4n9ZM`-yRj67I)YSLDK)qfUJ_ij}a#aZN~9EXrh8eZY2&=uY%2N0UFF7<~%M zsB8=erOWZ>Ct_#^tHZ|*q`H;A)5;ycw*IcmVxi8_0Xk}aJA^ath+E;xg!x+As(M#0=)3!NJR6H&9+zd#iP(m0PIW8$ z1Y^VX`>jm`W!=WpF*{ioM?C9`yOR>@0q=u7o>BP-eSHqCgMDj!2anwH?s%i2p+Q7D zzszIf5XJpE)IG4;d_(La-xenmF(tgAxK`Y4sQ}BSJEPs6N_U2vI{8=0C_F?@7<(G; zo$~G=8p+076G;`}>{MQ>t>7cm=zGtfbdDXm6||jUU|?X?CaE?(<6bKDYKeHlz}DA8 zXT={X=yp_R;HfJ9h%?eWvQ!dRgz&Su*JfNt!Wu>|XfU&68iRikRrHRW|ZxzRR^`eIGt zIeiDgVS>IeExKVRWW8-=A=yA`}`)ZkWBrZD`hpWIxBGkh&f#ijr449~m`j6{4jiJ*C!oVA8ZC?$1RM#K(_b zL9TW)kN*Y4%^-qPpMP7d4)o?Nk#>aoYHT(*g)qmRUb?**F@pnNiy6Fv9rEiUqD(^O zzyS?nBrX63BTRYduaG(0VVG2yJRe%o&rVrLjbxTaAFTd8s;<<@Qs>u(<193R8>}2_ zuwp{7;H2a*X7_jryzriZXMg?bTuegABb^87@SsKkr2)0Gyiax8KQWstw^v#ix45EVrcEhr>!NMhprl$InQMzjSFH54x5k9qHc`@9uKQzvL4ihcq{^B zPrVR=o_ic%Y>6&rMN)hTZsI7I<3&`#(nl+3y3ys9A~&^=4?PL&nd8)`OfG#n zwAMN$1&>K++c{^|7<4P=2y(B{jJsQ0a#U;HTo4ZmWZYvI{+s;Td{Yzem%0*k#)vjpB zia;J&>}ICate44SFYY3vEelqStQWFihx%^vQ@Do(sOy7yR2@WNv7Y9I^yL=nZr3mb zXKV5t@=?-Sk|b{XMhA7ZGB@2hqsx}4xwCW!in#C zI@}scZlr3-NFJ@NFaJlhyfcw{k^vvtGl`N9xSo**rDW4S}i zM9{fMPWo%4wYDG~BZ18BD+}h|GQKc-g^{++3MY>}W_uq7jGHx{mwE9fZiPCoxN$+7 zrODGGJrOkcPQUB(FD5aoS4g~7#6NR^ma7-!>mHuJfY5kTe6PpNNKC9GGRiu^L31uG z$7v`*JknQHsYB!Tm_W{a32TM099djW%5e+j0Ve_ct}IM>XLF1Ap+YvcrLV=|CKo6S zb+9Nl3_YdKP6%Cxy@6TxZ>;4&nTneadr z_ES90ydCev)LV!dN=#(*f}|ZORFdvkYBni^aLbUk>BajeWIOcmHP#8S)*2U~QKI%S zyrLmtPqb&TphJ;>yAxri#;{uyk`JJqODDw%(Z=2`1uc}br^V%>j!gS)D*q*f_-qf8&D;W1dJgQMlaH5er zN2U<%Smb7==vE}dDI8K7cKz!vs^73o9f>2sgiTzWcwY|BMYHH5%Vn7#kiw&eItCqa zIkR2~Q}>X=Ar8W|^Ms41Fm8o6IB2_j60eOeBB1Br!boW7JnoeX6Gs)?7rW0^5psc- zjS16yb>dFn>KPOF;imD}e!enuIniFzv}n$m2#gCCv4jM#ArwlzZ$7@9&XkFxZ4n!V zj3dyiwW4Ki2QG{@i>yuZXQizw_OkZI^-3otXC{!(lUpJF33gI60ak;Uqitp74|B6I zgg{b=Iz}WkhCGj1M=hu4#Aw173YxIVbISaoc z-nLZC*6Tgivd5V`K%GxhBsp@SUU60-rfc$=wb>zdJzXS&-5(NRRodFk;Kxk!S(O(a0e7oY=E( zAyS;Ow?6Q&XA+cnkCb{28_1N8H#?J!*$MmIwLq^*T_9-z^&UE@A(z9oGYtFy6EZef LrJugUA?W`A8`#=m literal 0 HcmV?d00001 diff --git a/search/grounded-generation-playground/src/app/fonts/GeistMonoVF.woff b/search/grounded-generation-playground/src/app/fonts/GeistMonoVF.woff new file mode 100644 index 0000000000000000000000000000000000000000..f2ae185cbfd16946a534d819e9eb03924abbcc49 GIT binary patch literal 67864 zcmZsCV{|6X^LDby#!fc2?QCp28{4*X$D569+qP}vj&0lKKhN*HAKy9W>N!=Xdb(?> zQB^(TCNCxi0tx~G0t$@@g8bk8lJvX$|6bxEqGBK*H_sp-KYBnwz$0Q}BT2;-%I=)X2ub{=04r2*}TK5D+LXt~5{t z)Bof^+#0@Rw7=mKi|m$bX6?Bh~_rVfN!~Z5D+lYZ~eMdYd=)1 z?To(VG`{%|MBi{mhZ2~!F#vq`Pec9x)g^>91o^TxurUDvvGDqSS9st3-kw(m@3Xga z`qtIzyIr_nARq+I@sH7;0MG(2NPTSa#jh!1f4cEF5Xll)bpZ(>cyI|Q1wleT1wA5Y zq9^hv^x;~(?2G$>(CTL2)#Ou-rP=XDW$spn8<%0TH%F=^X^(F62Vd@bY`Wi$j$33w zf!U^8o_B|x>{pW$eFZG}b7#|uFueKt$`e9j!wHNBGQX67&nfgl(Ae`3qE-E+yBSfA zEnJSA6p%}|+P9ZIYR{w}nfaKIlV@b3YYzcH!?WNXRvg|J( z((lq^WAE%Q7;oE?zDk~Nvg1Dr_0)KH8m&HF%^&8bI!=#YAGqIx$Yf2lH9S*;=c=b6 zUHi?R*$?Q;>HU4-#?hGJ&dj2jq>d3;_NN_TeipMG!(E+ou)RL-kMQv(W$b9+k# z*%bh8;4)9Je-Giu+XwdbyoaSGei^KG*(1D)5+h{Kfg<`v)nU>dj}RiD_+VvZgb7>9 z-Qb^cdc0k1VSIW!onbm2*_uY*_+r1qe${8^DzXxMnX@F#u>I3_n0j_0ih#p?wd+gPI5niQVbIIsk zkxy%JZZqLeb?p_DXdh1*9Z(O`Nm%TZ(zL`RA!dd+$VNO>qwecEt;dy5w%UK1@1exK zD~__{?4}pb@sGL5CjI=xAR7Jym_*l%fS~I(m>6873y~E7k;IfdA_0)|1$o9?h92Js zt4eu6$WMaSodkz#g|LB%Iw?^B?6x^A=arKjpBhhH6ZCbk2{;io5x)B3eh9R{KEOQX z9|&Q1T3-YGeF+9$doOBzU`TntM~LF~ON3aEZ|p9Y7+wF9qBi`6(hl}&)@-uZ`4zJl z>R`Cps(&x90dBZ~SLeCp?oa*PgM%P!bZaG*OS96bkBT*gF)q0a zxEd&4ZXnQHBuCrYm@m@ffPQTObP*2j+P z_?=gLxmGc32nceW5l5oy=+SB$=N%F^{g}lKR9(TljKIPHw)zVyZ?3ODUL^k;0CuW% z!;ErXcl6|m8OB+{5iYNEq}!Y@o<%r_^{5a($V)INcxkIcMA}Gd8LUShZK5U!u)=PR z6ZALS*{0F1Oxl?y$xE;JA+eyc6mW}LqFTZ3ZvVl#h*UFfj`$%JE0l8D!JRBYUlH!L zJ!uZs@&)nqNg9x8t`fZ?k4Ihgdv(Ogzr)|%{JQ|-g@#=7rCIq(Oo={zr!i7F_F!6; zqpKdMO={?6)e1SETQW+U?L?WPzQx9x#RrVu%xa5u$bDgLQrF-K4Iwd}9a=yS3(f1J z=&B1p=UwPU_#kfxrJ(YnDYZkc%{pp&sn{<~MdR_9^8y%u``RUJaJtY*yi=~R9ryu@ z9kzsKGwMLhZ1egl=e5m~k^Ft9pSfxI5B!$g1WaeqpO`4?C-3aj(gSm%1+@BdqpyAV z@X|;G-&|(jA;zG>T=$%}2gC%)gu@pTPQ)SpSw*2DuSrX((%PM=kQ&E@b=Ygy)l&#k zn6Q419734+(;{THjU2Uy9No0H4_jV1#6O)c>u@tbG6oWD;-8yHLnM^;;b@dWvle!?{40o`dO)$$EZ zM^@JN7b3@-+?UUO*P#gtLsy$!7gZcziDwAj59PsCAJm>m6r+l^X1z|%wu-jJhnQ&_ znPJwq9_*qBLoo*W`sPdYk10kPgf$aH@4qU~%&pFl2rZ0AHR*E-AvBR{F9QCehDa@z z95xXU{QZg|=zb2Pq36>@3je4inO+>S(`ht?)Z#zrHM(i>qE+>iU#!8v4QnWDruR08 zihT~ec3TRJh#llhgk(NqF04=VE8}61FWwvTi_}KWRnkIGbxQ)CAyBfBoVsTvRsR!v zeeHuptQ&5sDmg3vV_f9UtqYjdrR(_D^waATK``ZJjfZD5Kduvl1+l2-u6Qf=6Ombx z7Sq ztJ92oU^LD6n$?=8G?#FGx#fF$d!2WBTf$UGVa}#`S@X&5dFIq%K!1Ikjs!+ybc~8&;<*f2$gyb>j{=&y@=kHsC%Xl#WTojY!)xQxm z+xUe-8Of9gTp&DDOh{Yy9#6leUk5m&-h{G7M@bsLtAJZq1|X(5;ulY z-D2nY-`lAFFZza${swOYsV>&wyw;MiiXw9Ze4so}{Flt`IeJQ5b1l1!d)yG4v?WEO zO3yg9oy--%g}hya8*T);IAWhS&T>>KL9Je(WS#9P#!$_f6!1`7cfKj*+i>@*tP8Mjj|un5Z`YGD>MiCU!adPX zx#5sU8_)@)5fHgRLdp7k;l9Mr_8H3SOvpCBbBRGBQ`Wih*Xpj<)C6}E4SH?GeM1wt)HAM~N<~ejyt^Wpq0tmp z6X&e+wbKjOt@{1ng^s>(semrGFCQLXu|@O1tvtmYwuZ`$BSe{a-011Sk2a~(>MVE0 zpIQ7LpuG+o?lOHuw%e_kJ6yAoXCpu*QQeY%8SNh6?$89*3`>%=;EOJb+gtz&Kp|yv zfPV+nw`uTKbxE3vpT)v3C@L}V3(f*@_3N$Flc(8e<6F?hmPF|Dt%$W})5dMX(nql2 zOMy&yEWPokJ^l?odvVv&l(un4B`x0UHu6T8LraPoL*NltIUElZ5m!YVjcyZe{0Gtx zK{scl85IYuMO$EBG$tHHu0zc0wi&8rW3`d{VJC$oYNJ?m2MBStoGQ!4xQLHS_tBeI z4=tL^Lv>Bj^g79fzfCc?aTHu%Uvn6&+a@&*N~Rba)gbaLl?WBo%1^Pjx=t&|S^9nh zu(^m2A5XEp+ZN2L2#w^7IpLW%BW#F@6{50p0liwKYe!&NWu2F@oIV-5r<}*;+3|bP ze>zfTOAXqW760vNex|NG!Xz~@Wcd5UhOk&n5clNgylEGuS)lF7K$c{a+Hl#rx-2Ic zD(HhN(=Sa(v|zonLt6q9;>ZBVh6n__yB8Pn7WCY*KX8V+u(@n9e zOTe7&?}Fvh8wHRCgku@eEVodSv4NBH%wJEO4wEp#-}%%$wR$2D5JR|@$vRkRb7}iIhxv; zshP$6ckt<2KCd5K9#gwy%I*Ey>Fe20M_29Y=)g1AcBH#@^pXEtP30j`IbaZgR2{t^ z`r?E$A9Zdf@wct0$aRwJ=i9-^yxU77e+%zOG9j-MXBP)nekEiIFHfS>Ba|3w;D?|dL35fhFX>Fi zQcepJaiZvXu&=IsDUMoZIo?5N1`h|7?WDfbJmXcY~w_lg&|t|BlK!`YFCDcu*n(Sa{%c z4$vg-+drB`)#x8&q6x0pG5p+BKvfIu#O32<*&LF;z8q?zL`41|Yicx^Yq4jz6>WcO z4=~f8fF;F-A=fL28*f$mLyZ)0X>6z$biG4VuDpiV4z zY~_evrt9XZfAzEyT`LtOtA^qKGM{Tq8NMHGIOL>T;4vaiE@lH-C<@aOeh_^m?<&&h zdXSPA^^n-i>Uj{Z%Lb+6v5B_zD^V_GWE1OBNlHndI9YW5kD^Kk@cZ&Ia z6oRdBan^1xma-m6+`d|wRJR`V~A;L2zw&Yu_yoTtgzTrhi-xxFYK659imn;^%TR%3!4mYTU`we=`K-=!r$)M^U|fng0gd4 zY&D|@id)hQ6lZ6$q#}%snpqqb>@aUApp7;*W>0UoVkg(l}MYC6COXI29 zGc~J-gZ4vC{yy!bjlkXM?rF2de*R#dL=(PI9-L-quUxck&u`DmTQjI#p*2mPjNqc? z$X9XK{UtI;@pJUK?cwIxV;%;lTG0!%y5 zJpWhb11vK@d2I=!;)F5vM`ML)^6b)LCj<7zlFm7!F$_T_`hyDZ>MEBe@A%a+9RG#y z_*KevIxJ(rEBNzd_KBWC<+$;IWH5}W4eTN}TM#4*`n;PelIth54aC}8|KHL1Kd9hY zdg6C1@KJ_+m6OHmY-}EB_QYaDnd8)^Y#fTGC1QB3E&Rq&s{PIUL5DzjJG<4E+;x=! zz3?hDSALlK#YF2II?cmMlq^D)riLWp(`LjFJNTY&BkIxb04C*yZ)Vjb*8{OJ&U(p# z3cxi}BFmgL+V%Ew9*g|D_V>-jj>E&_kXF}@LX&k)UuVIb+!>`~SGXZrZd9yBFoeR5 zNrxA*){}5*BIRJ3GSAb5CW!RX5}9`W*v3|J4v;znteT1Jn6BmRxF0|>v+o2A%ix3E z_}aH+5hk}2B`>5kW}hg%W`rkIVN-e8*j3!A(mQ&IFKdo(2cn%(!rGGG-la2y4dz)d z;cU;$Z5l<(tUS+pPC9~e+Sl_5OnGT=${=;{P%TayUQ^o1bm#Qel@0Ea2wDFsgpR8p z%{42-o*aWIGVFESm@;QGB)am8yb0`j>EazkuEVoKMd!r}nWzO!rg#7+BuCQ?4|TZ^ z`|;e56wJl>(SLl!DEUo1dvlUaqZZ{;%CQg!oaJ?FFxAmVK6uv$_;SHB!^)t!xv-f_$Bs$C)MjJg|HA#qe9b`BSwl8 z2McXH6Uvn|ClJyKV8|OT-V{LIG1v~h>gQprzhfK(DrmFQ4M!VgO!ZS8o6D1p%RSmV z+Xf5C09vC7w0t%eXb8L=U(~wlP)tZ3TaN#j4{NWJFL7# zMeiEPfaIS?IHAdP9aH+sm5udxfk^i!o76N(KewVyMk&0@OpX6rwAKG}3?0IvE?(cPM;r3Az!_xLiYFY&)}Sl<19#fU0x zj-uZ}`Ey9BnVxqbj#D{R24|$jM(dNl2KH#FvbDSz*@x<{sy48Gz=(yRiYW`ofYMu+ zzdPsn^PhpxWX2v}!sahrD*o$$3k;XDHq|HQU^rDKHq%xw$IafF=^BmtY8T@#Z%YDW zAdx@ahu2vaLq%D&-me?D(}&)mEb|5m{{oc6#p!vRnXxnizHWv)adXiBb>q0*jdBJ~Zv<2B}4vZ{P z>E)ayXwPyT&!MqX{ao=#mpGCX5|61&)PEQKmppcZigqM*Xe+;DOlb?AQ8hZ8S0~w3)(nNAK)Iuc7rg zfIT}yB^fVpt`B3Pkl;fBY6u~2&%W5O{d;oadPW=tcE^D^C>VI_JPYukh@TfhQoWZeCJ5B$7I19W@q_TM0($TkNK3wl)QIl3|@|1RCuW$X^KSG)YgdJf$ zD&q2EfNK5$`W1XPc!pW_jn16RK(}y~T4kUY!;u`93tAJiu%lz7ol{&ur{Q zrA4yCFcU|gV0|>p_`D&ByZc`)DL+`Qqx8bmSv%J+qdQd*Y<;Klb{>?OW@XKPzqewj ztIkvI-K;Hlf@9cCVRdISFG4&ME?xbBnin*J=9sxZ+*CAN{PGnwwyeqzbU^u}JEz&U zujyQvjy%LMauULwp0$59k|Lxd4Icntq<^uQ3!iJ0*EJT#GqBhF5^zk{hkBT< zKNwtg4Y`s4lJ-1VzUy%1!)~>kypou8iu}HY$;B}2qhX>w`(0ya>5ndBmNHvwz@<@d z)_T3Arr!pCuZ?)(&jZ=LnXHsU&B)ifpJd12LpQF3x4*zCIMUlbov*YMkDIX`ZQ}#B zDEm7;2>6H|!x9eQMZTTQ#83yK07tV{aiGreb{XKo=?{!()DRH+$I-(B{q;fyyO2n) z-rGbBGoMjZLapRim!$3W&f}tbELYcO^N@9^$@oA{Fw|v>Jo^sP%|m`>OsVrmyd1`r z*_-ScUuU|lzR~%OHT$uyWNQuw)pj`yF@eLl^+;zNjqf~|6huSAAIGYnALff2fZP5> zz7ARH{>mIa^RkT@w4ZV!CXF(cDn9w9CcPN-d;=6xcKKM>?vd2tUshA!XM9hA9JplyPAlKHA3W}2f4;=EdS9$VRk zJd#7BDuS+qpm{NTo#0B*Oj{$Z2l2)5j>joob07T0UCp(y#jl_ioRJq7;CrcFZ;7+D ziT+n)gme?&`MZ8Q3URYd1 zUXO6*c;TeIhsi*l(c2?lau-s#yIh8Vm$bBPLkB24pwd6-v8=f_57U7s_X=;?ZMPX$=V+KD?D%h69Plxj z6s25MR;B`_3y$P%?|Wl%v9)a+)Xt1ovYG0-8ZEx;{wk%oGLr8D(F1mGIiIYKO7qIT zkyAXybQE{@&#($=@kZpE5&n7R;k?&LuC|WbUG$$?mLATHDk-iOwVbXY!1z4~OSn zL9Iql5xuH}kpF|{#T-2i$=3HA7g2YTKZSXE!U$;^53~)*>eS`jehs0aZ z?~}w>o$4HP*axMt=ZuDj#B+$8z;s<~`^+`;?9euOJhNPximpeOXZLVk`?)op?#1LI zsEJ(3NA-`GoL{a>z!{Z>a*D$!ZnSUCRhF+h1{YrQx-{HFin8WzZefO{l z8cNaM;e7wxPv4B1qdM6*FoUE$-f@ij7)Qn+%qi1X#m$C)|q*>heV z_F1E1;>jFo_X_SxU4z7K=dzD=a^~oL!C9SEV-!KD$#mnz60qM-#pJFWBjB{A91?@LxNGc9%0{4?@cU#Y7z;WB&(t+Ux8ij z{ywC~@RW4y=k@~>Rr8pTmb$u=7qLo2Vpes~6>g_ENtTY7^pVeIg!wVc`DUmbY|`3M z-R+tCPAunS>R|zng`6f_20?)pLm}bSq%ja@pW1*wXr=T!IW0oYP6_8+GG^?eKvEc| z0FC0qr5|LsL5JWpacSeAuHLx1qO#F6G*`!D4x6a;L#0WM=HD&Vnsp=Ye)1&&^=NgK z$R=p#49`^kf{*a{V%70)-|osKU4qK8u*Ee`n^}AVgiVqOGq`)`$~)h-UbZ_TpWn5) z4AU%KuIEO^Hr5rLcT?KcOFj<^6-E5p*F`RXe_*jNQ-<*{pcs{>ypy$kvv5&h_=hdL<+0wfo7i8Zr zN2QPM2zwaYFfOrCFU7(G*GymiiuOMUH#o1w-P5{_<`RmBx9=5gvCW1?z*U9M+@ATPF1Psy-Tq}n0&H9|(XuzmZW30{I#a|z_}fb*J@}$Os9qoBgJ+y# zL#8>}`N|}X{(N$J8f*=>O{m7)%z$pbzMS2$yb0xce}L`230Nn-UPkBNZy?Asat0>M==4pw7^P*~|GtzfgB9oEz zSk=B0wEed=|Ip)4I}(ZDBYlprm6N!l&1a{)JCR@4>nZ9els~Gu+`<5ezJ3A;{B3`Ck6-7#p ziFkA{?4$2BcHuw~sGfB+sGG>sgP(eW)M^H@39}u3uf^6HSPdw&q^1jxpusc>E1p9-Su?Z)!3+F+@GwHP~|a`e`o(nklU0c z$M)W3BB{3Wn$(JgntlTNAP(iL>=b;wqp`!xMfLpa7@%+oG3L2vFv0Yd{WYP^a(Nq8 z;2jw%*$3xNJbL7%aTo}j30ZXHpm9k0sVi_dl8xNyUxDA006-~CjL%1|Og^BvD;u`5 z8eUsPX>1Jry+fY`?0PYEo<6g2_UycjSnM=1^3)pT)`AiKgWBpcxjSg3%AirFd5eP* zjvhK=PEj=}3VEoUv38N5?p1FxcdB>$Mz7(sJzqFUM>lEr#N`oGvZQdU_A z`K|dEXc~4j2p{1d#j?jW&BI$yC00u2CH5F#XOFeDJdb_wrIAZDw(D<$uoFNSLNQjK zmiC)`+pCCs75<1NJK7S?oxlh4Tt%Ivo^LVH@gw3D4)|DOKg<>hv+aNnO=o?qd) zBGw!;7ZuIzay6nnEQm`!NKyMPw{nUUXT~md>GPvp*Ji(};@O*%38?IVxSFTwda8h& z9P2K-lj+LZ<%5qMIw`qxMMTPc z%1Ih+=0rkm9R@ptoN^AtL$sNVqokbv6{Nq1?bg%!*-vI88&j7m`-g2-c|Su|XmJBx z42Uub_~d!tp@Fbl(y`29x`NFGQrL6X@8ZCx;)-D4k4cR9IoeQM*@nMU9Mcy3(NVPh zf_5O8k#(#Tw=kX}S;sXT-GpXIvnQowOrmasb{$NgKNzM^`;cBQ=W!Z=VMcOmH1-K5 z^bm4kEA0rOiCv@0Apn-2k&-3;*9MhJ?#( z5?H^2k%5!&3qybCk7+d3658c9fRy__w>T(QRzEr z6APC_Hl-})SqZ!%4*dsbIVE1#BJPv13iV6|Xed34s`O*jDYmyxsWFar_w}g$gsP-F@R z<>#H5`3B+f=oWr9JZTL7Z{APZfW5v-+aMO7e%ivNM-W#S?|Fvcyr?2@iI$Su+QJ(8 zq)JjtA!jdwfSsSQtWg8*n1W0cSx?;@IDH_LVuf6GBSq35qz-=rbdpafaqtpmaJkD6 z)FU4N`0$>ky=urSXvZ>Z5+CCcp%Qe6L{{t03OeZ+ zRCbk>BIWW0M0}3H@E=v2SKJ_R*ZIq!pRh-^0N+(eDiOZF+6xCZvte(X-r1bgx@pkv zyuQ{9&YI}0FuXVNd!Ap~T&FwUkgPRr@D4#DMnvJm1tLU6;X~EEviiyPcadF~p;X(( zPfbc8;^*!TCu>?d3D>G!=ToM}c5s~~nAt0=*7w(iu|XXp80WJwG}1joDxbSx$aAHK z_4SS%_W_33*4oH7igJ$!EPp1HV0E_tW<^(9NXO>(=o@os$07H+%tEmGFeU>MmLY06 zM#|ETy5I{ZDk;tjza2(WL4xUo)ATh)MsAvybn+I26<_Ht)DH2oGS;c^iFp z4=e6_4}OiZpR&2uo*f!1=h32V;?$GJj0|3JHsw|;xTovqX6j}6C`D5HN!C5e+*J7P zKF^L%n<_W(?l+=cLx(%qs`;Bp2y!0pTKzjaegZo4s`ypoU3=-CzI7%Qc0MjP+hvIs zvb;zY9!)RL06PHqC)}A{LHB%6N+xzQphj`@&{1BeOL{q2x78AOd_f7I+j_IvX+|Vn z;q+Ntq*~#0;rD1E65XF4;rnv1(&|XIxp1t$ep72{*Id~ItSweukLcT7ZA-LpPVd|} zI|J&@lEL%J**H(TRG(7%nGS6)l#a|*#lfUcUj($QIM!Fu1yHlZf|t(B?*%dvjr||y zmQG$R(Djjf#x&R_;KPYt+psuo(YjfvRY^YCepUr0KHi`K5E}HpQ}UVqa+|mpE`Q|< zdhU+Q^%%w9`tGj9BKCBPd)P{E&^~Nr7WBf7rUWVMq8{5g_b0ORy#>P_8@k~pp8sm` zAK8t57^DN6D~ln!mx3!7?RnjSQCppf;A@p`!|uysB)zWt0wEJ~NP^3@9h=eFIzj}u zLin3oX0!Gg7N*gAUQ-kEVRUF2Fm*1dw5V-Uda}wp?rS*;JB*a%d<;*zOP(|x(?XuX zT@q#!3@qgxWi@Lnx@t<=W4YNd1RE{H-DO3K!}#f@QS$BNWln5GJmy1GJa}{u+9e|K zO1UT>v>KSj}% z1ang#sQMe>iK-&XnHp09x5iB-ZOc{map*+J5@myMGiwFnRd*g&rOsi|J!C!Hu((A; zk{)gS&m|={yS~CZCVsNh)&>Us*frV$UMqb^bB81yA;$E^JwPt9k4NS5IK(?4EDb^A?E^z_xMj%`kfHxeCO9B#{Q6c ztL=4VCp>ts_-;MHzD@d;1d8)z^Lxwb+b;Za^}>>?(vDJ)dJ=Iw`O6{ zuC-%5D~vgwyL>QxiSK1c-}xkG{zTaJqlTx)N2nHZ+MvhzFKM(L`;XO2D1AhuiWvQ`?uM(s(Phi{U1pa_;IqwzwsmyrO{H3KvRCl7LMSLGWoUjP z$oo{WpJ<}lz@>{WL$!+Q<{hhlP|KdeGe`AZPv;w?o=@B?_3SHT1GjI4PEScrQyH8r zPDPoV{+#wyfE@$V?tuKORJ!R*uK4H84tF{_%-is=TMLf8!&|N1cAt|vc$_3U9X+bX z21!M&@Pr@ry9YoEg2S&IWRFo~(+%E2_Xr~IJZC(CXIR#Lx_2+XtScM&FJ>bgXf0FA zPfTyb_3(SA*w5%HLA_6fMi3xkGmXe{AahG1?v7F4Ylte+sgNx8yGLE6p?5b;zPAG&fcXYZRYmHY~O|d)^ay%!^0=f^?4r>4fNSZd(zC^9ro6d;5Lq& zqu+6;__+p}fb*>b26D^6eI>l%CJ;+T`zM>Jr#}sMG7K%OC?p?w)hi5GGJ05ziOq|! z=x=f4L>vZjEx~HXe#at~R17>w2uJ$!_`)8{^Tc-jR#Hi?jt-prwCrGgGn#3hl24dm zldosg>kw^8#goKcCK=*+s7-U4()3lMoxjW=HnQ_wb_FGqw*!nN`=Q7pBfaSk?msx9 z4w(l2)N4*{gEFy=qg~fFvk7l)fU6LpQTCK@WSvf&0LmzTGANW1@7+QJ3`M+dc2Y8y zt^o_&Lq1iu@x#K_YX3BI(R#bD!1=5b(kTB~ViL`hpz<*}?a~GD5=9I1B{L1C4+Y!A zA*Ore{`=ZUFVl<2uCxSy(0t{=6&oGBQqKe^J}Y>^UK%$EpwlXMh~1Xy6&;h}VGTdcm4+@ESi z$Xo1_84wSsl~^tnvi^v)!MfQFLhjh3Ay~l%t5k;|Spz?SolNM9aJ`XJ+rE?UGs%Ydbo$nb(!mkD|0>$yf2HhWp#)nthTOk*s)IOEU_qIB_MT}8Gv7w z)1iert?Vlq6I<_FNO628gDnvW)ha~1@FnX@JdNItDGO=wkA{|iNP-4H!meaW;A3nZ z*tb~SNjVUMvsZWpGORQw2MXO#j{Y%0y?P5g{}7J&J*BzZp3L|uwdx2Ppq%3F1EY>m zSL{U_Z_W>0&M^inR~kA<-my?xX;qSE7eM-kG>l%7BZ5mn^}%`$CBimAz{c$w(a%;?K4-_vd|h6H=}23A>@E z$ziyCWpieAcE+IVDsiV5^Dr}g5^v|%)Zh~w;uiM{jvo@DzuB7vpcATzIOvzJMkSIt zf26$!EdeSgg|6AiJ*vvTq+1hol{BA7%CN4P83r2@Gmb4!U~TS%DJqALJ@oDxrw{KV zzl@mD$SYoAB;sNOy?`=l4vMHD0iO4wDUDY4$EN2L3ng@)bsU^EZv5b$e3}Ewmj0W$ zGwaO3)M%7dm31}_8(ODTfo&ke!rs{EF#%p+z)O;GFw6Md@=BFP<78(Gb92!|#_5rx zIUId2V7&}LdjT8rMnpf(pkPWuO)k0vo5X+!E55DR^6&6q%s$++q;!;_q-vC3F_M4b z=gR_=C%tuW@`w`aK_{OFYZ`E$WhRj}ezCN(+F`Cp%uP7I-D0kY+|3B={b0ULsgi_5 z^_7K3#>9=Tpy%USwd7)uDGU`1jt;-9T9Z{7(GHK-BjMzSDdaEJrJ|(e19O7=axuiqvckscp64zgVR@{C^ck&^ER#d^@CMPOP)^kX( zvBciKadokDb*w>}3Yf$hgPs?wM^iGo{D8!nZOmF2Geaz!Z#H=kbC?2R(AY92O@8hC zZ9aXT7k0mUsL4-RG!BAO_;t3iI`KBfbxhjQ7 zE;Ou=mhw^wP%bG5sCx1Od@mvWIIS9S82b`Uff+*eb1*tC3mbqwfsNDC!?`lWaoCHb zEK)M5$ysY9F~81=s$x)3YKNzS$}(n_LQY@mSHh2G@bP?taR4NfT+$7Ykzuh+ogQl4 z^q$$^2ZB&A;qB(Ki2`9a2%e%j&<3O{K<;2o>N&ClpX;R=mq;M2xa%OMq^EhT`Er{N zWso(m2D#g%AIvd5;EJt}y#Ue{Y1YEqk*mK`GzGvuApSw#%V1SO?o>+OpM3~a*G|(k zT1ek`jRH@W8PboCmKYhoNq&VNN*NI8s81-U1K1&KfAe2MYhbbY~k zNxeYxvAEWJ#@xYUxwn)%p2xJdw~Zd3)l^xq?ERE+_hq@5VtqNoo+hA`2E4xl4VA9j z<58n##BL}in6!*gpoQ+4W|_icS=XlN=T6gG`&D;0PE!9}oizRS9!o&0e?Q#uw54#z zi4Tl3c}EV2UkyJ11Ruk}HT5Q6lJO$AV58k?a322~4l@s*CRw9nS z>j%EC#ja3R5pUnuw#p0;V4zy%nR6WJo~H)`uAx;!0w7z5CeY{A2(anBn-I6syH*Qe z+%%=3LRx8zE+io$W`pUMC?~j4&VzK>*an#;@^^E>zeK3=XCK6;u9pp6rY22maPvLl z`z&ftU*4?Xpf%&s?A@LcY|-La|I2`^6(e%NX@~FT%g*;q+2P%?JK1yNOM=_W`azLU zv?5hzA00oO6k_rApf~mM&@J+%w_k<3yoLuQS9sH%GISt?oobE9yfUd;ke<2SPrHRU z)9$v_dU#qc?D&aG@9n(%3;oI@{x+*p0=M!i5?XU)S@t4yv&~}?oBj=#>FAI9K2yY- z)%@LA4Nx#dT-f~umG28ayK;YCt0Y1$5%6`7-2#SB3K=uJFp|GV1QAZRyEU>`Qmsm2 z&fx!s*q7P2Ek_1M)KZOXi|5bnf>I@&BAmD55@EIx$eQKCTM?btfx&8BHK1Y2tgkfg zyS>9(&d_G=g5Lh`^Y{U8iJ%Z8iCsK^^ZU<2R8>x1^Cr`Ow%}{^W(Z(Lj7!85c32TY zSX})fwa<3`c=nJ@deoQEe}^t}7q#v%Qp&EhbNX8QF73Kbicrl!e)MJSuLn*#9YzFu z8IBvPn#-rv%m_c2r5L1&?V**H_OCY3){>UhI{?5o6Luq^eaNy`VzVH=tgX*SB;p;u zXpnS9vfL>FBveRvCG8K(t|m@e#y7$8AMb7TcWJ2zpJ;ff+@j-f!M?Md{C%|N?EL=j zq7)69qnr9+(`pngdgxFb|JX~<$JFaqlwAK|H)JX!&f<+A_1usw1UbJSBjBiwDFS1_ zUkZhZB01EPAeBj6Q&t2-d1GpIg z@vmFNf-Rlrte~+O!ehclveAU*))^3)xrKm2m@J&(F;67BpYFIdOKWuVGqY{Y;MLAm zYKcgz?DQ2szyOTX8-XDED*~~Y{5Pqje)Et)n2h(MK=^TB?SfVW>iBMA8Gs|eflsc% zy5s4YhYtd8h6iG6H}m(qj67mc+Vu^I*V;qr{mlJKjJgS*2v)1uM35IpQL%v|{(kH< zrs}>E6Uz)#b}aH2qXRbloOwx15YCG^)Xa3Igeb4KE4j(JH#%3Mn*yF(Bh~$1wEiQ_ zWpkxeyVL?*Q=yBJ$P5>EPaglkjsEBeI0F12nCY>t(OUy4uOkDL4@POv{b!wJw7laU z4}L1ASUHdyqOUnWBZ?_3n;&Cgh%BWL^SK4*$SmGDhw(DQWT8WQJzlR2{i%4r?bz7# znv`Puo^{6X3QCWnH-1xDO^e6`LW3*!x(#}UQYb^$mg z`TrJUaUt75yl^1#r-{J4e^3cAl=I_Dr=>xwm7Lg7C%(`TwY*BG#QR26>le0+ zSjA8Kpk{_9Y|)SEY2B|2Lv-Cl3gV+L#6O}c!&g65jJ@HknlYmzUS$?;sa(dF{aIy7 z=>r`$X{U0m5?@2P!cXZRoH>HH8_3W`dWy13 zce1IF^&L7{DkW(g+eI$1shczxU?#d?dON16jK6flt~Chm`~GAYEV57P{@Oe;9+#Oq zkxXR@C13kLs=fg@v!H1=+1R!=wr$(CZQFJ>w!N`!jUP6r#mw2MMX{-)F_Sgh&vcW zKE{vkxb2N=1XV@_rK%6?*bjC>#k`8`QL88_Dn?4u*vZML5knoj56%U-t0O0_fTM<# z@yL|l)s7tseqKE@4)zPbaLr5&?X}E4Ot8k>PY-VRIH%*kl_$W7(DFrMJqW(|$e|aj z<}Z}X&QMT1GGoQQxSiMf=_!b*(=4>4l#EcTp$czycI(KP4|gOnGO6L0eDozy$`iq7 z+jF{tG>&vUUYR{Kr%9Lla1L*V;2bn1ARfY9ekHvww86i!>4)o}QIaNG6vxwoJBfN& zTG^klmW8FkoO~!yLKNX`W0QJT@pnWPD={ zkDz;wyAkm}F^IwL#dxW_h}LWVc2CV}$_(NXmvU=bO)ZX+l$cV81cR}n0(X4LGVJf3 z?*69|d6rTpKAe^X@(o*wwl|!et)4$unl%-wC0oil(%97D^_P6jz`wT8$Y8Eex`Ri$ zLXK0kqAI<$(RB^aT&In;aa{9*fb^QA#6{ZM3kUoC4I9VH@~zddNKFi2!)|z0EboNE z{ia6Q1z_Y(3Y3Ly7U?{jIitwcPB?I2KkD#~_R13bhc1oA>E=UoNp-Rm^(^Z$3)D+M zBP+9fE^}*E+e~z!_m$WpyYO%_fki#~;DgZnT)#X|4zIP3;zCXlDq<`sXKAaI$LZQ} zyyr@+j|I!~63a@fS&NEj95t-RdUCfMVvVfzMYuT2H}=XOX8I`FmUKz^F>cjo!0k5Q zF?s$VdCpZVq9&~-PfUFk=~ekfUT!72%3sepTk&V6s?>ZsA#WXBWxBkf%zOn9l{e+T zyM|jKz1s1FBgTbu558xvCcama)nrIOB8fOXl%v)5WK^JSqX?#fTc~k5;-d zh(_Pd@tFK?0~+T@Iz9|(X3b6@M??0LlC407cVDzsbbl6>4~eXM1-5VW>Ztk*qTzZ<=h~(g;x?UD>*TPzg327N_qACmOb5l z^@;AHAh=}YglwU6tAbT6ApgiV*B~yXi)m!wUxg2!t8E~ zmiQ;$RIsLL$|H!HI~>8zo}XYOF3N>af&yprcg!_FIHf<+vv$RD{(%0TM>ZN<9x@MX z2+xwNd+uQ|Y`tn8I*GHUX+xEXotm(v{vvG1!!eN7`0KCReg1}Gii3Coe_4@=a;|NC znt+p)%$|a-rLke|+O;%oij#`fw}RyKW|eu;J9Ht{%7%L9JTpnrS2LjFSNIGp#)`I0 zXh`y^GS%fTg$q!#{) zC3`wacCX0}bd!Jo(AKHbye4qa+h8gyvE}Kr|1G1cA8Jg2Nk+DBUvzl|ZyVEFx*kru zTI-lfYI+HKIaSrrZ6v0hvuMLKrJGX$8nje|F&>?Dary8wZ+8jGzV&@ zE-~nInmW6Ep9@1VT3YQjx0*UO=Ps1~wI5IAFxM6<(mK4WENak8@3mY5GSKD66sm2*H*yma)O0?)7Br`1`KeHi86a#yotkjM!s%JhTraYdP+lfcCj4mpTL=a>KSHmtd)aGkvevTSKC{ud zobS+D7KMna$Q}BYHAA6dU@!Rr7)jPv=4DQ`XJXcb#cPuWh78?MNtQ73`71@!K(xT&k9 zMuP)~u=%IFwfGP$jrR`N|4C|9B;RpmzZ1AJYJfm=ly&Tp;D9d` zy*NdJYGnPL4-YR)-|D`r4~Hs5yT^a#x69-*Ix^236v77`Zro|dn&`rsO>J*}k1mP# z;tG1o*fw^5fy}5-p{{6wZE^jWBv*Kbr~+`8Ah>6*${yA%l`d9v`15!BIw9BVfYaC9 z<~*1=*RymuE#tINYfUvTv2dlN_=Eup{6)VHL4SfV(M7W7&`sLY^C6ReR9Rv7=@7%i zgP(+ZRY1XeZqZhR+7uz|f=*)v?ZxTy&A-mIS}jp#8r>)z4ulp9oV;^==msMFeh9?u zUe`TC8bqEaKErcGH^cO11Nr{wFX`Wvq{3OaWr(X$!p-So4Aa9tO`<#mS}lg5go-}G z7qL_={ySe4y)Q@36h~%XPegs65PFSnrTVATTK8e5b4)yPlCx|=sfx<-P|9pNg3T7% zSK{mNqa%XXT~v+Xv2puxdwC?4`ln9%?ClYeXt~8m2~?qnLW3Pub;*sxU4>FJy48F-(=`E7>< zN~(g}>iSE|%k#1=;(wNx?MCj1CAHyk1B4v@j9CX0i%-9WKLkGfY5bk$gd)Ixi+r4d zb3YO1Sz_u0w`4&;oM++e9mWLCTiLZk`)Ol|#i{KF9(DA-NlJS6UX|Ut`=-Oi8NDV^ zkA3{f*A2gx)11?2#&w*QjYe^mxmT`#oF#FSD3jRV9oK-?R(R@_AoU@#6;UgLd2+2D z-KBSQ9etULXa8!;*1M!7`Q77ieY5#*?P|Mzu=^9$9@F3feϣ%UY8`RWp~V-U_7 zDSM&-@cv_g11tXxtR8hhSsvhbm}^TIbEA^ zez~Ise9A5xP83c_%z83NHI&u7X>Mt9`pnf9TVC8vDso9r$$%-f#fu6f@a*df)uo-Q_5os=ED| zcEe;FMSWSJ&ct}ag!R8s`bGUZ`f~{uR>BX_16UIZu3|HQ{An_9v zHp7)lLClDc62YY@VO}JkS_2kF)MYGEO;oHS%W;YuDSf29meyQ*kC&Q@D5Y()UirbQ zeT^&uH7^72nS2!YD|zY#+SZO~YV!l{p=s^XHa8fe1Wr{Ir~lt? z&T9&mFQ)1Obn6G9RBhN4O5^az)h8(>R7Z`?G=z2B6om`t%6fF1Lre{m0c~K~0 zXZ`%Asz;D)&nPl8w^z!q(xW3qYNIS&^j=w1)?4pd)hsHQJu%L&>=IUNSr-?V@a<#y zTe$XUE|?}yQS@G4Hzyq}NAYok$^v;@M3G?#N~=Lk0A7LKEyo$`IGn`T`3c+&xhE&g zGUdOb(GqsDl}c<$s___$V9iP|P`$KE66Ka)!2y>Q0W!(Z1+^C&IwAD7-&RKDm zn@lTqPUJ4whnly4U#AuBOX0`y@9}=T_iKqGj)SrPBvyHgUX8{~cQ&n$YZMhEYGih$;=(NLFnCA; zJ<{P6EViq3GdR@A0F*j71H;Z7rbk7w@|D5)fHG%I7z!A3i&zoOG}HN^4@2Y@zZPW8k#z-2^|-~Kx5rTa2PJ#IoVGbx9( zms$_6iSdGT;U0f^Fi(^HUqEObfHCxveHQQmm5N68!ya{NsbpQ!J&T!=K7H*BqwI3( z<(8F_S1t|R9X3GYtkqCkY%MCbUS*P0tD$w9$x6L;NSmOB={inXdS_%wItd~9g6P?q zbe5ls)xwWyqa@6o*JRjjFm*JXA3Z_f7BV2Q zr|8x;r2WS3q$)JNtkgct{V{eZW>(nSUAP3`gSGb@Ta068{O(62Mo>By3C4Fb0xq|f zF($svLG@T|?ZAQUbnm64rqnxjz@vnk*h&!BzyCpfWGxn*q%`b!2z>QlqgEDaj{z0qttc?)(Dp;3e z(yy(@YjF6%)!PGZ32TFI_{e0?Tr)><@Nh}%lMmyo%EZs_SFe3u*|%^JhjHJ1XGXjI z``I;gHSp+U(PI(CA?ZoqXG6&?-|KFNIGgKWj|g#lmAvsh#qaePKkb)vfkVD7B!sBr ztwrDIu9PhVp@t9Ota(3qIW!E{Stq+;x1M+(GR!qB3mdmJ6EZTkf_M>gnYyV*G~{HY z916Bf_&5)i%wxFAr?Wy1r!~*FqLp^99NyPZ-4ZHUy`0AUEz%0+bKT6;SlXPy5^Tn9 zit~>w<74c@=Of=s&C`mfeNxu7BhA8zZ8aUPGKDEyrHnjrw?v_#{)nzNg>MHveY_6& zIahSkcjLb>)xyrl4^6X;NEoPI)mVS-Scfz&*j>UtsLUHUf3vOFe{VM$n}31R)1_Fa z4wRr_VWG*Hdy0v*FC?d$Ny$k{ruxs|=UgZ|Sy?quvZB$JfE;70t4l^6I!Tg}>eg_Y zhK81qii(yP9MQjwa+ZXOmOLc=wpjZZ^%-&YDc@d%&LQkEUp2PM-s@%<^j>Wd*zN{m z`uIvD`cpvhgNaqh?8!Rgu94tEplL>Qwr-K^bDvl+D{FmgJ(tCsl2)sp@ zO8+Z6RqvHilF0dRCY(_2%LY>mq<5f&S<@pZhp;K@gL)OlJ+wIoR9s4riQb7G*E(lM zT`eb%v_6o2fW3}!gLQdyB7{*2rErWtZ}2<$YTTn(CQ5@*lC)YA5dw-p!l1x?Fy_?9 z3leg;vQHW-#<5G;K_a7kIS|F5x2qAw4Sjry?}hr}BzXo5(-a}1Nc2lv-Ux=7dw_`8 zr#XGH9?Vo})J2ws+jH0iX=yh&74q$+tx?E~Dm3uC#iso#%yxrgdwQ4sCaS#1Ba6qP@BDTTlWER; z_Nr?)h}&+X`Ml*kd?vj9KHR?7)+4QIjnxNdB$-4<7JHBLV%V%f75QVvg=?DA@P6oP z6|+Cm*j}NeBB0y|MVZI3d#*aVv3lH!Q7ug;bw0VX0C1mpTVDuBU-JlZ&L*CrEx~@g zvWYf!%l@HoTQc76+$Rpybh9IpMMRVsTga6ck4{C19$W_b-Af|r-k^#2-F(MyP}23< zJMWV1g}YafX{Z_Rw!3?-w2Q@oq1XAOMa^scf-SjkdSwG>qy_`I@4l?3=ytXtN6RU2 zRZ?CjbKpA1i}Nb`pyH@hS5vF0`s&TH$8A47t|iq@+0wI3nn-*7ob=)T!M(+ruye(< zEom9SCd#4heQ9Q{%npGh?2m^nPetWYjy9zv4ia)CrBY?wNlG2o zo#y=B+)MHX17`SlMY?qZw;;hMoH1JbxC*NXfq=*3fcaLt)%B_ci+Z)ctA0~lZj7Ga z6vPCw82$QeeH~s2j~}m&FVF^B5Z#nSEA;WOmT~aU%`JChOSD#3x0<`7!@a5b^5klL zE{Z37&-828$DM=l8@bj!a;JCkT=(qSYNG~mYkT=r@32~Pp9^&Xo0jSK~pHT?6)f?A*>9E846baRamXh?Tkxg^BjK7qxaHX5Y=?%)&BTXb5Z*`A0_YR#@MG~i$G&mDiVqBUEQmb~ zT-b4iN)tcawMQpfkx7NKEy1{U4Vn; zOn`N`SltDeICuwP!4I|f=KE&G=pA?A`qlH(c;DggP=Hm>jkJD-jK*C)#5xi`pESX`hO z)^AT71c;{_!-jQ+x%G$xqtk23#8vBfe!c#pI5j)(Ml$E{L-uq#7#P3Dj=X_A4S*3H znBlL^`de1}*(c$r2C$6jPAg-6!zeYxwbp@XvS>GY%obNhzgT{!V7`!tha) z-OVAEZ3n1vj2wN3s5_q~K0zKsWlI+qA)%XFSW#i>btv)AF5|UYK=>9Y<6WAGKhDm9 z>~TM~Vs#Y8lnF4USHyMiR4{8lyM^>Z)dfszO%?SH*J5wT-p#cJ8(>q7#3GzJM3d!F z)-Za@re5UMqQu?&n9LL_mJ&?!G}p(vhkYsK$*YuiBRNhjbc7<@KedR3oRvOw-kVSZ zvNJxHu<3gx+=T^c628Kyo3L^%6*UVHBMCbNS2_Jlr-!(Ngw;HidJPwcpmr&Bl;U59 zAB?_`@FD&}7<>qFe0pDef`=aa3O_%Rh`BLksk z1{srtza=8k86*=_O@dPgt9HG}|0hh)8OxMT0bAv-7S4Fb0 zkDTdD6%FGH%Ue}4h>u*^j8xB_GrG5#lle?4ZT|>P~W#{+!GHsZ*!l_U6YuunTFV9Vtqf-CEsVDxn`5_ zegWYFLHw{L|BwU&fdGMe0K@i!pl&e$0rj!O=1jNPZnS(7m~FJ!;{0j+xwhQ_1~U3a z05a}_tpl|I+UO&6fZzNz(^vM}Pl59UBL=z@EIP=wKXq5@hQb5vVDO@jfd;{P@VE}| z0xY~=(gD8rGvaO%D4&jJXmxC?gP==rw>UIMnZNf={z4-^_zT*Ix}^-jB!2k zsR-f(%PW|#fZ&86H7muGRa1F6?9pIhm8d1o)(~P9%PpAKkYJU7&co?v^T_d|XN>#) z!3%Ovp#4Gk3#VVSKe7Ntf`SREr>Nwd-~$rz5UQg@HcIOd^R48sza~N%YRAc*PdML#BJHU% zJ4#DV4c^j`%%U_6meXa;{077Xkq-yUny?@_RH-3I0cN|8tC7J-Yl^_$Rx=_&M=_pvWW=AIentRL+haM^^M| z!TJ`luzS(QKo?tikn2H_8}V;H#ebuMG_;kI2~LHZbhVRt6=mpZSrx`hmuKFx z3p~}OY^Pl#R_&`Tvz(4^{RvRshVqw-X{)yH9 zEB6-L=j}?Bvia1BBkGmEU6oSnRJ0X5#9WAJ5!^$}`yjW`GO}i*_erGV6U72-gx>Mg zW9BMOQH5LzgXPRFBi|ThsvX!{k@({FMf7vMm_e4Kum+_J(dn)Lx?}A7A200KY_cH& zZ?wkfPkq{|_yzY9Mp{DUScVS29VmOGc7M+9)y?>8m5*ZX!DrXh%3k;_&I`f^Jz;aa zG6fxC5KR*@I8v{~$+WUL|Ow zdm)QEgfm<=jDTes8x>}^Dn@G@!Z^BWn9Ycf*$dbtGkju9OVo@ zN9JtXndsN)ukmMZ%1Mg5TXE=SLrr7d` zicE-1gCh69WSS7B=|11x~CP`}>r@j8`xaL>{FyB{^fQ6J{djI=f^&&_Ni6`plZ3X^D3zfCZpN`I&8SBNX_9q)=j-Lf8 zYj3Tk$k~Cdm-m&_^Hkc^D`A`*;amMNkFK47Q+u?<4Y#Q_%qirCD5S5q7wGWybg1UW z$zq7iLKXIoVfZFiSM=*s=+hIaizoRvD#CpOAc7%+GWDghfOQ{tkn;%--4Rdsk7xQ1 zgN;yU_w@wG?XGduS}l@sWdStsu_z{6;wpta-!bKJ1NAzhaD3S(Z8t)%dEs)kE+ZJX zn8YzdzDArt7?Kv}*9<8pI<*d*u?4C%O?XObZYL18(V7*eHk@GU(b-JnjL1;83=vDO zb;;T{Zg#laRQT$Wg#f8g5vXrExuj*tA6dXNu?im;@qC!!En^%oGk<^`Y5@}S?vGnV zm-(nUVZCeBf=!wptO)3Hfz9gv<&t@Q067A9>=;Xr601f*wx}hVjrJs18=Pv$yWBLbvBXw>nybvCzqLC zIvrQL3rJLYh8-HK9rX@x*;aZ$M_Xqe$PWEobiHM zan!Ew`Cb1ABg@_`z-Ti_x(?)N#Fhiceb94=| zCK|AfQTYM6Amb+3f%HP z^V4u0z!4aj5*Yk9nldObupdW=d4v&@(TVAIU?{B2Hx}l~SJ>@fP_{27JOjnY%M8y! zFSIc9J%$(=7`=%Z6NZr7BHnsLv&+2%b>kD-&{MgM;U5Wu%_=ludGG0P;EwJW zw(-;ih3{K>ko83AOA0DgEede`#!H=+2LCmb%YhpN|7{bPt;+fcyrUuMIsZgGWq{iXfqPthbyUu9!)+ zJU47kLMuMCbn6s|E6}bu>(tIG0N>CJ@Q1Pr-g*MPj?{*DqyMSS{34WyvLz~O|1T(2 zL!vZgEsOg4iI8i%i@K`0YFUfAzVi_26`4t4@Yc>Z|G;(e@^zj z$RazYfEor}cw|BSH0p1sR9{H z5rKppn$OY{68FPYH>jflNo`1d5gH7I{M`SGey=+||IUHXQR9o|yI5~A4_rC(H ziNr(c;DY1}bfi`lQWhNvTivA%hIb~>UV>O*vs~WqJra`4%34)gQ6uu5Nrd}@kHYv9 zYLbh=uF#=k5vVROQ>1en6Dca%))vuV#c!4zxpn!=w5MsUA#AfLGdLllZ>os0SP!nK zGUf>;|Jv{1!@HI8m)2JoqbVhd({sx;Gc2P>wrloU#1#(d{Nas#BgdxI^s9)uBt)ia zj2)`u`D3HwLNo5h=+lDJ($hi5Jsnrb*)+;tiWerf?GSdd)}TI|C^nUe1fMU zzfJl#(}0yS{m1j&l~1x4VgC#H{ygyC0zhBjy>E89|ET$zUp;$Yo_wD9rnt914vO=h z8n1c%Fg^%@8mg8@?$*t??Ha4AQyTA5H{7(vs4cN*@=O~5Pf3@p1hkz~1CXK?M93+i zBqXGkV^Z)=$^k*BWke}|h2YK>LY`dmskcsyQ)qfsTllME$jy-N(`S^_8bYftjv&7F z8Ads#u;?7ay*K~W7YjgFIz&}bM46)5{8eq*q3tkjjBQz9Tcgu9bLK6WQr5IK^k4On zw~f9~hp|WEiNtH`~g%s2WN=~vDAXev}Q)o5k(7`1|7#$y#ymJcr$Sy=QryTHvc8)XBDW+kk z7<8p_$g1GU=lWAVB5ZXR!o^d@Hd8*Vj7zic{OJUL zu*i!8;e3v#P+SpiNyT4P&D~X5{!z)^RZ;y>(YILzB1IicRfSYl*>y?Dc1clpNtwD? zO}kl#_f7G8LH@1RZ&~28Q1DGP z_%SQ&3;}K-54)z9MF>J-+OC5F84oRYI!c0vZBCl;q&j^Wkf}{e+uYhFxOy23Vecw%=fq6_;Z3X&;HZgK zY1LfSvQ(F;Hgl%UT50E6Rl`~r2CLAOW?%M7?g1<_MXExofEv2@z5Tuk=I$PiN@D0s zTfCdy!%fImrCanX!RW^jE3Df(1~OM1xT6oZVBbYRj>#wnO{ zo|+`GnVs#`F*RnXWG6Z8b!I=lCcmBJoZChJkMC7wns_p2^7XI{r#*n@IYX~B!#ogR zOlT6gAq5M*#~BrBdd$~P&FmZsKbSZ$9_t8WL_@A>Qcm7P$w6x)?9-(MdAPLd(0*S zkhr0RX15y8;h<;k5lrB8dc^NR2846F>eFVcY9@g1?Jm-l7o+-I%+nqdHoCs0&}=s> z?DXGMD8-uGUnTkbO@FbvT41f|(#}Dn%xFV@>_!_`*p-PNbJ^_Xbw3qD_K;Re=fS)R z_e4U~4iu!8cSHqGU%!EHfL|Ah)B%6n&xq7MGiakN!FG0??PMfDzD^s^sOFsEtIMRE zV4H;eA_%N{(s|;J;^}xkIn1gRm0tQ`$=y&bOnhe^l(^;DZ7OeOtq@yoX#4$;G^O)LQ=g=q(@lq)b>A*=H@mxy1J=1&$=^A?lTO_)l#39YQ>8=k^ zm~&c`E@4bOQGyNNKrF$Sh~dLLVPP!6y3BDP`#UzA>@I>0Kg*Lx_+7KT=$om;f_*0EcZg?l*n zX>l~XdwUjs2d6Y6=?ALU)`6ast-`jVSY9kFg9XYb+lEo4ZL)Gd#>Qpc0$t~2!Mxsk z`973z41*Q_AUwwj;u1XfJ_T!B`yZ`m@4jH3vN$gU&sE|W&*UA@enDVCMIfO5ttcQw z&|P3YpnxpMnl}zXU;{F-NNCjwaP91JN3!W8P{|Fqi^PV}lvZB|k>XffE+?6=4wOt# zY`Gjx_q{|KPW76tHd6V(PHws@UWJFTyx$&u6~BKZ*yj9=WAYzBXuaq1j1{F~C0{Yg zj8?1Ja-~2y&5qaW@s!yPPg6dU^&Md0iW0NX@4opoq*35$~QV9DpFcPN^){+Vw{?Sin6l2 z;`R3Y`llrVF`z%-BU{$GM$u10*rtbz-d6PzU(k^$lxu`asFti2E0k*mi^!(5nxy{k z_m&Ga!ew+@UJqvr_I>$;gJLn*%yt9ClnZ8nOlJH3LefdKDy>Gl!BX0vo>_0a?kgZ3 zmCNRGz8WZ@Ub#IYOH7DzF(JZf9}_2xQgk|>?uPi2%j11}7M|z#dikgK%k%zfu(N6Jwh{(y%8})eFDrzrt0CJ69iK=NHI;V{+r*cDa#0yxXyC{;s zFG9~p?Vdi!(Ed|s<}7A&NPp|sTKDv6ulf{>4cEK3Nea!4X#6K&^4C>tYAW5>>j|6vzAEsWdBL!Irzul32428BP6n;xBh z-j5>ZCV&jv%pUen`nCs)oih!Iea(RjX-G;F~W5+~{MJX+Mq8nHs{#5OWyQbLN!9dgwk7DS!-P&l$( zq@ZmKP;a=}sQjW?tVMRtAe_q)pRVBZN#jX%IA5@$KkkyBUc^C85(;0Rzm7!q*n_PNR$*tPzlZz;(il~CDJR%oms*gR}8Ky_i&nk8k@OHEOulB zF$!Zc2i>M%cUvJmYW2NHG4xn7^qe!u?FJisln=BiFwjvkz{6mQ`bo#pLW(8AtY+i6 z>Xf^LNaije4=*VZ!HY(oVW$XD7tJHSZc_oLiD!TtuK$+72{{d}JNpg54Y3Sn@I@>| z7?==DXM+s>{rzCWMV)xs@}nmZDsUx#C&Eq88WLS(Lbev4rj~YIW^lbEAK_?L|H4=K z{-HZNu@wPE4dqrnZAchZ;H&C_6wY)&+3v!7#}76D{dNyi^cqbnBIUD8y&jeR;F;bT zeSP*Q`@*{(dOtY#Hq7?^nEy7e1E=MBm^WZODTc!=VYDcbO|Lf?CY#FVhR<$ukT#z! z6sDgl1Q7$I*BPXkEr4*dSyHjZU>0Y&48(wSy1=xu$d#IB0pNqHpt5Y>(=NdA$ZVW2 zIiq#pVdzfbv|LV1hpZBwfQw?ls~@14(W{u`I_83}I2`r|XoCf#;k#p^;V~JF2ZB^b zWDzb_O{!KIjN%RFf8M-cqS<8P%HVO!;1$zkc3b1ITch;?tRAg8skQT{ZH8B7)wUAY z<<7Tyz1$^EXMUKhzK>_4n9*p|8;%B|tRxw-X2AaZp3z_^M3ZmPP;avOfB|#ckB!%H z>d7xlkv=VT66ONLL&d{pDuI+h>aTn+^}hNqE~j)|f62w=t4V#&)YE+M!8NOqLt$R;ed=V(&BdkE+%zUu*e2|WOh&KbEFp<3FTBOjQ zCpX;rFkblx;J@$8M-1M(cA}hQ+oFdr2vvvvjOq^JUy|!C_^jNZ z71pFMm#kwXB&{YK?nzgO96d9 znhQcPoU>(ZsU(eentx@bDCGuT&~ncF&15hH;w#sAbmyXRO-5db`(!MXOwUn++L-sL zxa_%NS~TC4T(y=t}1I*7Xv9 z7HY}b#P->8Q3sw@DLwUXot%8iEJC+bHB)e$ueT{=RBxgsh!Ob1p-)8jX68vxZHk!y zLf041kwvK$7B2k5Ns!v$)wQ!QDg3RnX4M;vnoaR{tG^(mxG9fQfk!E^VlCI8uPRy( zF%A9%*_@DrSPa}Ei0wqDv_9Fh3rUIPxnYRmi&JmWFXZJPg+7+Lz4Pw009IOU<6aLU zA3%EYo{PW?5@n&-P(|^|=TX-iO$jpn9zj-{qvKo*e@zpr7kCTY*8#X!lI8gKzAQuw zn73cW^i7z18lQjuDA0ra;*qr0Wn$73v?y;sMh?S~tTH&U11gX|SPE6!~{hmrgr)BMD-fX)gy|Gn%k>5a_ z*t3=Y^$SP=^}vFLKp=bc{6EoT%sv6HdZr~*B`b7BKmo`@CKr-2MUDwnSk{mSmw7*<{BVX1;{23V3J@E)J+B; zfrGG>;+&tTR(09`qC~bEPfx(Vf&9gQ>iRjzUqEo+zfcg0!7~Kp6kt_;u?jNJLOnnX z_JKzjDr!J22Td86a{$$Zdw;!PX`&L82zx4Gslc&{>dpeO;BO6Ms*f}~!fc`;3?1Cq zd}Is}b4n;G1+$RmNboad%8*Nsfj8vvkX%#bLs@8LCZ(1wSsJhB#uaUxh^Z89M*$YGX3rW5heNEJ#Q4xS9Jru^T zhao>?eJc!&rAn53YC@-}lbQr~2+65Rmw0|i=c(+cqM?ZZmHJsvN6I&ngqE zTDHjgsL{O=>f))Z%f5`~qR%TMza0G_)-6x4g7F~xDbc&E56jeZYV($5XjYYBiJpFB z*0^RbmnEH`l^~ixo`Asj5KFKif7W`_`66zsv@zh;I(T8yIabs9eqrf7+0#U?3%jxa z=ZdnW^HYx06(X2M@Y6u7j%5`y8_o_~KKKtIv?wO43~DKibExZJ>Yjb-F7Sli@1G*d zw&dR9R4*}#|M4)`2!4W*{|Q2Bd#9gHP93H?X0>T=I$tqAN3*~7e{lI>_{a1P?SK%@ zA~u2X_5(5C#{637LvtW4bpm{(y9*H(v@+;m(gV=HqAZ61L};#aC}oilL-Gtz03ak9 z80!J>I=Bnq@IFQdaGhW5eU~?|A3)#vixeox3U-U2t^&TZkSxGcg4(mdF1Wg8_66o` zh;-rBduDAYSCQfS^&Vt;0V})LBv|7jkaH4liGPxbmL!Ph<7CKS#;~90JSBVP50lHF zn=S0LvegRUES%Tl+)6-BA-Mvl6A~po*RC!gEeo4;)~S8t`Nkp-V;X4Xlh`NdQ$(b^ zNVNx$p}46&lff=jkBTzInwONU^j&k_h~k-NQ?>{IeMBv44sJJM5>QKU)lk-ZQG0ZI zb9=TI%{O@xxgn&)3q;Yx(M1_Wu7x>;pM^<8&)oWL8a!)x4%M7tvV&cZRj>7$DdG6P2@M$3P z(#9RnWAOd6ntyJt5FIF6X}MQR_wa9Bd7}jT{14xssGw* z>)y%#3i3ym=ixe&HP2QaRy2PdC4_y>UP|=wmL)Q^&cZU$GoSLVW^otPR;K5XI&$9@ z-#Xsj!x%^EZs+qd8?vY}&eGX3r!%56HZsLCb~H3xWu?U@K_|H;v8=VMEve0OfJuXy zghLCQ;_-v>85TjX3-LiNLzD+g3}K%Jn)i+!$lEZwe$q8mRI?H==MgdjY((RJtIr-< zm^J;@f|t!-n040xr(st^u8bp0$H57s?Q=T_y*>7z_krbu&=0;Ik>6{*6&Il*B36tF zfTZt7k&W;>Qyfw;0Tg|Ezw*AGCo|77xX z-nUzOM|o>`ZhL3FV&;i|j_oY+Qz(!z5Z+`yHrTF#U4XkGct>>)_CT8j5!vsX-_r{>3oi&E3=R+a4onVk4~!0^5rYw{5=~1~ORS8&j7^MvQJ`NU z<00puOky^U5Y?B~8`gu}syOQU)bFC7LD7aH4VV}fIp}$i9%Crhx3tOdQ1K;9NDG{i z#46DzJ&j`>?mL-gq<%W-wrBC^=@Am7o^u zYgKPb1%x1`o4|6^yYu{HnK`XzJ8%2$+;k9Bi#<;-9Cy8U(Pu4e`X5|N_P}EX$1)lq zYX15OC23VJo^2~5uLhH@xqn=z`Gl5u4>bIoY zLzfH=cnChWD9kcg5I)bL=|ZU@c`bn4eq}p!DCrZ5y|e|2YXmOiT#ck7Ii^Xmqu;JJI6baux0aV7kP#z8%m3JV z{6#mQfD{F_WYw;tCf~T$RcZ-K{U9SJ=XG<(bd;N!>6Dt9#z{)Y09&CdL78@N6|QY6 zl~^2(kVJ)%n~@<&ma-}a2NSgGh8YIK_c}lFG#HN1x@4drJCJ6=h)FZRz%!~v8!>Oq z%KAh6$^D>0#makW-V{7MEZX~xo75Z1&=HIXy@AV+Iw-a$P#E+V^IxwOu>WA z&N->3J?mU=3 zPv(kPphJ%>;;7R$(C0I!0vS|>>eGorms0mg0Zgq=zwRT@?E0j$OwohG7ph(FYnQ7j zX~X`qrhS=JdTnc6t!i=ESG(BozUw~leopvqltk)E#>Yk0Hl$q(oIgW72Mt@Jl-b3- zS6O(k(Q)CaRcKMAxJ;jQKJ`D$7sY0(IvS|Clq`6mYLJ|vrib92!^IGkUGCNKe!kQr z7s;R;e7`rMr6k$;$=0%AP7fHwa8j4m_`mx1e$JTyo$Lr|Zt2l)YinsqRmNBjVPy&~ zbpYf=r#^j|xmcID7Vtv~h)AF_)pYf0*ml4~TL1tLMK+vhUoxwpzOA-?)*V(0O&u0R zd3myXO>1}l5TqXQCwwDNitITG)RD06uojT24o!wO0U9#xsNn)b{{S+hfFlLnKhnR3 zhYbFJpsUCQVXlTSK0llO9{^-Po4+bH97qfqgpjKy<(9n9HqI!|I8g0)K&-r6SkQGr zQ1g{Wl>?!`unDP}+TDbiHuA_Z2xRXqq*9_NQ-`_Ao3f$aRW@{Q(Mb#6E;Y`1kpl|o z-s2rDe-L4)2n{nL2xyU^OR01;WTh+Vjg5_Th334G2u&Xx9Gui>T2*PlU8RI<)_8z6 zaWCL*st2VP0e4$;D73d%t~KN)yDP(lLa@<50%yIykfWplJOtaZ6tI$F$CM2BM(b1caS63xzb@lPh(a|h4J0!`W(8c}zVgkLAB~FBR3(=A^ zRQ3bPxX;yOg+Ay#=(Q}n@)LA}t10w@f2sbmyUy+`nR*57Koi)9Gic@^Vs|wmB53UN zB3hhAU9FGzw=lZ*cz@eNf)>&Zb+9l7;i(~jxM*GwR#yuR*TlpGFifMN$UH?E$3PM} zmyBI(!li2^?Sq*xeYCK!AV2{Iv~vETp>bf9UWbew)SF!5BQu}2W8{2IC$C#V2t!54 z2K4Z?(u#J+Xwm}uZ5dT$9Ay$VpoE3sH-x)VlL}B&MnxIlTWI4M7a6(H2@h7%qF->C zvqd$C6PB0Dng();%07IU;ItbzP6R=NpLlw@ZS(>e!{2H2ENPj9(cggU1a4lygBNzL z{}=z>Y<&4;=IE%Q(8oVl`&!crwIBU4hX2;L%)UMzh&*7f|LQs-=cnb|0PILVQ^k)6 z-wb8^3jW476ui4jJ`>IupeWmCQ2T^!l6*z^)cle8hm=pzXXrEd{)fyTosZ{*@q7p& zt8kZ``X^0sjsBB@{y@U2N#vBXO*#Du`k!EQf2R!_LW|-%+q>sf+M+q!db;aV1U?4v zs{r>&j^Nd+S5;L-4(V4`#)EaUmAQBCs5IAFqtCUy1>!9j4ElqvUs*5jcDqH+?Z(vH z<&}Q}VWTm1bF&P?63xQsb;L5VbAF?Q#35p7icL#X zi5R47)j*Vm3`C*)Dy(ibk6fdmUq)Rp0?k~Ez|gXDdeDx}Ho*egJVW+DFoWJ-dc2Q+ z(t>MWQFefp0TrQGAhT(E7p~^sg{xT7F{Hi=UvuxqSG)AO(0U`gC5&-tcWv?i{Fndo zU;fYHTJrGlFuAr2mgw@@iD`cEMWgY>7p8ea)Lt1``8dN{QMn@9=66s(EVUnP&(9M> zC6(&w0X7_Av1yu!6`WEa5RjZgVQp=#APhn@V^Gj3>iYFo)nUL!1JQJxp(tcDWZM*M z8nj;t2~$(DWqH}}&txVh&gpMFiqRx$I&_#Os*1RC6c!~z(~P7976+4LWPx*p&_OwJ z>(;@6FH0d7FvcPZn0ga%wpkk;ttoL!IeVPhUR_<4d7*Ja5G4rb=Q@EfRNy0gN{x(+ zP^TE5W=~I{VuA3HdvkLWbpPPs;K|7eeDQj{pZiM8J`8@qlu9-$%xATg4u^&g6*ru9 z&`7~a6Dzssmf zB@n`)W-vB?q}S`Rv5AiI&-OYJa)Fypa;(zwzY`thn6B@6x0*9Oyp0`$^}i2JAoiqG9`O3)RO`txe<|3SQ$9c z{R0Dk`A36r2o|FpiVE)6E+Omkw_udCG=n86@ z%b0;l7;NFBWZo6a)@Hdnnx98??AMLL5lhhx5R0%-;csZ`!-|a8*FU#tcPQhY;K?cSr|9pazyJAb&t|ac z*{tiRCxw{d?9*Ycwmu2Hl1Wk(eCG~$Hp3pjL1l955^q#^szOFdp;YT#!TJb*u4Q+qFM~S1mKL$xUgB}Wz$gTo5Jh}sxeBw8@O z^9}}H6bt!l*9trL?%mtL*REmcRXZz|t5uoah9dJ$DxUevBnT8$K1v^C3|vmGtgLV` z7%vP)UX-%BYz|Qa9$bk?f7I{X&z30BxueW_c$Ol8X1#2hK8So>>Gk^L zF#}UBsYhxZsYw&}i+i+ZpmAUIq@dD{zH1W&Xe&4z=coBG!suHFp=cJs5`?g}j?1MY z*p$Um*#!omvsOw&OIibh#IYF#-``V^IcHxuLO$5cfPmDEg#{%V9UU9bW`~DIqhW~$ z+l-gO$zS~97n^yiXLxwHhb}_*hM`z3PGXaBEQ4kHq{Nnp?5wgbh*`Jza~TY^Dm#$Z#C0)#C03ve+W95I@Sm861EQmgp2x}5R^LD?yd0CPLI^%WHm>mE#fvAi;-@$XR47hGA5)d)uq)>yotcVs(43ky>A0PZ_Sk4?p}c2E1>@49gK5I4ue& zAvlXc7h5Hoti*yd|E7l6y%Zt*9>9MD@S)RG>h#@fZAIhXvf!bGk3U{0VT;9rOWC8H zy}fXFYkTJ?%bo7+?VVae6W{*!x32~i2Td1?=p74ht?&;ZjQ#{dXv`z%%wWvN)EeL+ z4zhL#ui05sS97^sv1U4fG+pK?1V~OnWQ*qDP~94xM8GJh@?%D2vh!7cdJ*HJc!$Gb!I(8crmsB9Vej}gkPi4(7#}aK zTqo3TA=EEc>b%ca1;XD`tGdh)@xp<4iD-F{FZoJcXF&ywO?b=cWRU=mH4vL1sHcx}H`$C~~ zI$fxizje0SeZVi;GWyYsf8xUa+KWrhynYaBhDvUy9q! zMuQcgI7LC2_Q>{#k87w0Kpv+JTO^`%)VYuj?hfxDDIM)_jlezce!esOuOkc<;M1Ch zeog!aiI_sa7LI49Ef#bJdVKP#ueSXF%KFMi8se3ym#a%Z{pAB1O6~N;g9rDY=M3Mq zYu6-0an)*>40;b-kDlikh?3sl$dpKc3?e>$^OR_AMW*(5PvXE+tP`vO7fwhjkmvQW zZ~$Zp7%qoZ574Ws$QDPh7v{3_GKUGfAF7F0w2Pdl6;aOQ2#!yaBg`_@r8fO7+9VF~=~-d-u21)?NL z+&Fd(%hb@*rwQlgema{yp&|LPxtW!utU|8=PU1MbB2ycalWi;Tca33ZNz2&fGmZf4 zJmUuyA@A+mgM;7w=5KxS$?q8eQE5ek3>8kn0E&u!&%f6F!*WQq7Ku%UJfzZEU)=;^fi>*ghYy?*Hz=(h6^v5Q*YbpKf1ir$f@8dziqd3@80d-gt`AVLg)j=ZnyI^GW2R?btO%E#&0x? z8m(dC{A-2dEjZ4t|`}0*tgm} z{UPx5^tAUO#v)+jb6~3siJpAvU-@6+WR#w*5QpLl4uzn7X)RW|k zH4q#kOeWNd+hm(19oY53{hc^t;Zda;r+qg+`Z~C4$4wU~0^8e#qljtKH?Q9s84fx~ ziZM7mcH`E>^t49&?+kKYfz!C+ngi*f7EK2JB@=QCyn*Ggd#VxVM(%7Y1Q-gQ8fU0aF_okFHI>bWt zHd$zPi6=EWNLlW@_n(Vm^p}Xl3?odD7pxHq#o%UP;3okvVFzC;ot$jGI6OW+&Z{^u zFfb6LRo}ost+>19z`8Dn3{)@35 zgETb24}x==fAFP@?w(Um?BX66>+|^_O`SRfB}-@(;)7~ZX4co9o>Qpv@a4;w@KCTv zk}6GydX{$&H5${?lW$Puc(i4K*u^F$Xs85DV%`svTui}d{76lb;p1r1Tl9L1ZR6W@ zJ)1@Cb6k!SfJ8=Fr~=dv+IXT!PBPWS4?enp4`0|!0u+#J$GQUyuUu|uAT$uLDRZ25 z1ke*xp&ULjA*F!yL2UI>+2&=LmBp8P+iMW8s#KwSFDx|(7Mo0sOawYd7%lJeQ*amC z%Iw17^)7I&BfR_gB7xVt%u9D(wH>wclU!sMMRt=hMMn2N=dz<{RT|t>fL*^Q2#Hr- zN(`P9g#|ORi*INfF_atxZ{!}s+*8mWNr>7+pu!(53qlb&N(vT)PtZTd3`5=lq3GWv z{(o9Ymu{Nd`a|pHaB6FR5O4G;sMhphbr}sNY&*LX=5k+u-&6DIzCtANM<9@8G=Jd< zo%?<+HgDRc;FaJ8J)GGEDrXfEZc3^Ox+i1W_{_C_0*=t(W@gx2_Yd~5<#okQLROQJ zh#>qKK^U;Nd7suU=f`)krMWJWp6UX(T);c#w)q=;Wud}8oJ2EE5u5vOIoA(7?Bs^9 zG1+l^<}!WY&Qwix^544q10-_%hX6jz*}#Sm+J;AZD7ZoA7HI=P7A6ww6*((OX)ra= zk0+q=9TX;Mx-+7=duY=j{~5tUPT2;zA}t*BbCpBL&kff}-n*7rc#_dw!&lWaonpY; z%%qM_>*^{<$!1!v*8%#CbGUeiXgyEMS(+BDjMXY+M*x1G~m|Pm`0hD*5W=KMIjN!PyI-Khg^JH4j zU&0yu{EEHp1g>`()%C8`#m;4?)7n%_xk5RcElb6s1bX^#O=i}fz0%XfX^BD!OOiJm z4rk#B>6XllPE0~8*qd*^FWjDI>c3dSIKog7@`BG?wgJxp1D;iLxvF1P{R&57Ea>uD zypKP)dH-y8cef8p$mMb#hC+u5M}jPIDgf`2EvUaWBT^x)onz&;E+;^B zfwNtoZ;LLn&FCTp(Z!CGrnbw?OPu~znQG}EQ_aqN%yn4tC0d2M5l|7jMkJw?@9VQS z@|zpH1vkohC}-tLrEFUKey@Y2ptVoW0J9%MCZxY!Etk}?6Yc?fC=&tKW0cziHf>(1 zp=nwcHjAd;WjD*2%}wQ69iGsu#bOnKY}IuG(JU0sLem&Gs+Drh)N9}wPy&P_1Wth+ z$rgrTbnwvXvWJ2JDdcuRA?`Z#gz=rM0qy}}g;zI?Zj$(X6rlhM(FGPa&d$yn*a=3s z6BohIEs}JUVd6N2O+&V=Fc59@*VS({F?R3%@*yqkw#6h|Sa z1*8|{bhhTY9>wT3;Z6rUe|{euW2g?@_OgCi2d#503@PkQ%t(j&NSy);^5bclpeUeq-iN!hSrL{M1=Fm+Kq`Jt>;u%== zWN{WRp^hAGyykEbVW@~@Fa?FFPLcl2`=JbTpNv5-AsD68vuAF2mO1Dp&yHbumI)rg zvv1rN=ZaMbf7hX0zrMK0UBAAvv~>3ig(3gDNXwY~JLcicOnURnhlean}r~I>4-@gcb{~8(DA$nXZ zt681z1tHjPtH{xcH~`cWwwdbAh7@qKW}^flw4KBB{t6YPApVgiv7xF4nE(@`jN=Uj6dRFJBZ)_teee zSy314HptJ{YPALppMoeTazya?qJXq3UQ0a(J}3B64*g_*74E5R9UrTZ{WJ}|UX@u3 zM_X8&xctAJiHW%xLW=rJq&zvkWou#F_^6R&EPTFjD}o!CJq znGEbCJ39*>GyIR4nQ_lj+cUez%*@R9@y^cd4u-*T5;I%2n57o<|5pM#@?_xnDk-bg z>MpKVuipE;SJ+y?@( zuX8<3o<5yicKy23+F$4z^&RSJZgzgRrJy-cfvk>6?jJvR@OabQ9G7cljlXh*)ZegI zV<}J{tM&fn>qB9B|HRIq zwpUU;fm6X1aWuNMv9?xgWr#8PUYIJv8;-5rSTeQ0wliit4W2#iZft4NIfM%^#V5Za zOnab2yZm%3odvYr1W?O_k1hjm6ejO#yxL>sBV08T3(J#JpkmV#6K#aEvxSGo z62rBEymz+TTb!P}N^V5>8{`I&?YB)2#gA53$hioAj+`S$droW1PP0Y-Ec!PUNb{=(elBS%tYKF zesuFAmOwMtW*d9Z#_qvmd(PdSmC>Y&OQEbs8qn>5p>>o3rEQgT>c~!qKD#bh)|j1+ zXH9UQJ?jzpt~J3sIeBEM6Njy$-m=xvX65HC2Hiboe)#axG+<)Wm&{-JwZHb)e&rIr zpDh-F7#AUgj1}t<<;HeVgv|8DjW_-Ai3x#%nWRGe$-nz||L%!^@613JPlL-G@d^>; z+%V)vg~GXWZ+_NFmvEE=4oBc@x&O@9zIL|%V=G-|d^~gN6i+2pRVB(N5~og8*D!Y0 zs-Lyeb!;qVhuORZgv@5!d~knplh~d-&X%yol(IG-#+gZI0DCRn$@I zoubgJwKh`UjV9vj)6?m+cVx^+)YH>bLjg&W0z>Hb_5%7^AyYYci7 zw8o%UZnj3dWS84G>K-@rcKg^+?kC*LFbX2SsQSVSFQ`RqRkW~xQXCZDwB&N9PTklm za;<{&80XIqIT;Fd$S6)u7O!TrS92&p4idm%s|$L)mNzVZe>9425L+2{VV{R&6Jyn6 zl27N(OxPe$gFtF6k40rVm&y}e$4;wbfasFk?xB{QRDKzqvKEV#!_6g78|s)#K?Z;O zexhR~MH2UJnoT_6`CP7LAz#rWE-+!cSW;jpWf=yI3d*t)=A$U2M!L&paatFavUm#J zIcy=>rw^?T3#pWt2apPxk)#>uQp&Lyv$J2$w~V-k+-|93+Qp-2C|kW$ynNn$WWnV= zH&e{ljtsl3^|}?wD6$+xVUSI36@}YHAtQob!CVdVto=R%ef~nHAAz%o#xlint=dxT z_HtzgxAZVWat7(3RO4i)J1o0TW0QK?En#zeMKfVV>*?!p*~~)33aYoBS4JT{D3bH% z=fZqpH(QTzqTL&opFBqYEIfXy(fjw0d-C!iAtOa_*u`81*=BOhA@t5WQDG2GHz?#b z-}`U>?Z3UZnZqjzsYJL6QRdyOb#ASdh%$n98#a+L+EH^k8DXa!VoT_XKVYFnx%xu< zN3%}q!<_@)aLWCq0?)s9dviW9E`-Ojj;K~jqQpTl|R+h z4ZXp>fH~q)y#4)|x8Htyy{wEp+ZQ?TL4qs^To`7RKEf=}@87@M?2uy$cjdVh?k2ql zwP9MiR}=>arJ}gz>85bv#Dq9DX4E-wWL(`iI2ao%ErDxWDrpw0Ro9LY7-*diHNu8G~6{QU@DbNRaBpkL=X4lU^n-+*4IDFc(XqqJJ{db z+1glN-%pQvy}n>i@4z5JlzfI&=L_EcfX#8Z6J1@|*-h;xOIwOMbaujH6F$q-v!8dk zJ+8sA@$rclUsv+^bZTRLb#>|8pDB~iWdl0c;Tokoaq05;fW2BRHi+~jq=osVr7MFG z0r|Z4%jV_UOK!{K)r=`D2sXEW0Hf{eUth{b1dR4an=Nj;2Wj=Qb@~NLU-+q^yZl%# zH&%Mb`#s;|d8Z`Y9r`Kl@AwzMZ2kLE*}2#nD$rfA7K|Y_|wYWox#DK`^rxbvbX-y5q5GMZ@Ddtix$}H zI;nHj^Gek36Qk(lv#gshZf#xstRZhw z)s+?U-|00#If4B84fy4^G_jk73Sd!YtIOu``PSDr*S0^p{b2LSmM(C0(2fQtcqTw$ zCq0V33-)EZ0!v%7&Fhj$2D_TP5H{I7-q8Nd$B$OC^B|~U`<>-1v5n!KF&oK3C8=Gg z9!3+`D3_|agY9jf&(4PiFP;xLO}wEv-3TgQ+JddjX0C36to_WO1&!RVx_maNCi~m~ zyxR&pTbb>&1a1fc>lR1D_UR#;phsb&eoz%`gGVy@R|Z=girYnaDssHQ2z@JX)a6Ma zkckPhM%>ubyXhL8tp=V}l-z?vC)@kC-s+%JI1P#~bf$KDO`$vf}7^LX#oSNGO% zv6_DM)wE`5!s1Ofg{yIVE#ka560*R``{G46$wkppZujx-)-gzk)Y7BHN4sV=*BH`qx>%Ufcx)51bISBIsUI91 zEH8)Q1CGV{9yJC8{I04#c;GoT<#(&qS1(noK40~gDBjW}4DeT=RSSbOed(&t=X>d; zdi~O+Fn{S%z5ZEf^Uubx``c0}_m2c_3T!ov{)gJ-3+4Y1Rqh6U1TvrZ5@*XheSJIb zmz4*1gqPj5i;4F%DvDu>BC$_QGf`ym*jL0)GHV7~U*GP2wrXOyzaoNy3v(m8v(?wH zHqszFyW87)_((x24Zt5^2&Mg+6^Oq?JXYkHdfrbOhDLcKf}Vc!RC#xIWXLJxAu&Hp zQ<^@+MV6|;UZ7bdCy+NjyWI!Lt3%di$MJm>Eb36eT&>k@c86GJ7{s*R^rEL)BwmyN zr;(54JU)yulY4b_gu&<*FwDq5)5ve0XM0yR1H|~)zGpcont#2S{PR!Noa)-Kt!^)q z$?W{Yr-Olwjlkg2Kiq*##`S~F#Z`}IbLs*qO}4 zL?V$YNdqlm$-c%~v>$XJ^B1UtDwsf({eaB$yLTo@SXWF7i@aQW9*JZdU!7 z>h)6T%$dgnx0)_#en}&LDop;^yyehW-LP05KCJ0uXYx!>{Th-We?3h8@_c8ve~fL$ z4DqaO_YKFx^w1YRk^l^@7xP0KqDuN>X3~7iKFH>BM=s=v55rD-x^0Bd4y0-ROn`<86t&kmCdD_T>aOE4cMYWQU%_nKk z-d@kKV-cPw^?F#nu}^|nD1u}kLV$rRBfJSL3T`O%+*ZP@gff)bXgTOkPtT6lqnE0p z-3?j1+b&j1x<2d>bxdzvbPNx_c_jB`9{+rh7%4SfYGFx|y5W9SU_^^-$z8`JSWfG2 z`W91(I2bzclF$nFxa!*=@aR^};}~+w45^<3m|_?x{mH?Qxr0=8ASc(e5+iYKIPUpw zB}^6~`~q1ZGXKbSL%RL``|>3-F<&Axt$y*NUwQ|hl^A)~*z4U3 z9QJO@W=J^A_}6-W6z@+Co|GVU(%1?N46t-q3GfW%jsw7}rPan_>3#CS+i$C#L@(86 zj-~51@~ljW)rTvhI%40B|6q7cq=ePvNCP*;C>eH2iB|An%P}S<@Esxp#un5d<9QUT zS<&*39%=6MsZ$d{^lWeEb9%Nk%VL8`xepU^mmNsb-)SpI5nOBuQ+yE%x+JO-(X72-lRvE<&Zcp9bHT z*&nsQ8;NBf-@E9}+;Q6;)afCT|V%$&^BlYOf zxasuiiPL5RA|-}RC?b!RRif}+U9;YW5>5}TDYGv`_MxU#k~y;QBKEMsdcGc%b^vJ9Io@#0|1w$bGj1ln$P z7VtLbbXAfQqa?kw#Jm?yBrDZ;*e+Z80GW(2jBPD~S>zdu3R7ri&I;%+LuW!Q5#|quhYz$C;`^v1#)45q#q5sDCM!SNuIOv7r?bCEHA32?g}H|3lEID~d(Icgdj z84CG4zTR`i>ts&(<&Bk<#*4q~m%ZrbB*m-<95IuD__PP8;(~X&S*i)N+yI+CgwmFj zqBV=G7Tgfq-v!Phn@n4Q8#hc+pm4iD%lf>aPff)ZY`UU&$p@ixx#S1Rm%gNg1>H=N z$*`zDeym#ukNs#eyNA(!NIrJcgf>-r7Y58_0I2)>?V}eEa8DNdF-7MfpLui`A+?Ak zHLWzIu!(Jd_ld(n3XzuO>6rB^U%CFmg)5`zAdvi|Y4j^!`HFRKdFcth;U2B-F$*Tm zWwqAt?lCKP>C0c!Z#4rG-ey`Ix`T{*+;BfI;zu)Grr!xmn-+z>7C=HMO)a5UH`3J9knkm4T z6OiWqQ|D)1xOR<`jA9!6+sc!>_g&=EOazYo6k_5Ln|Ha~AL5Jg_(AkAx(MM5_dzdg zKBp1J=56|mmIqHVswhf|%|4*Bt=DgPl0nLl&E0#@p2a;KY&H}>m!7v5fb@m!N8Z_< zEHB$^%i=`(?QbO}#Ol=cI~t`l{3&|^cLzsnfBMwE`;V4}f}5Mcq2+(H3z^JrfB&xg zhg^@>yxz6Pt{-wY)9U7o2}>hz%%e2PKPOk;YjK?#<2s*VQY;UBkK%{^MVXQo@7XMa zx8o7g{gg~3AWUdVV#s$jy0*Y-V$(BOu2)V%ARJa+qS*N~7c6lTLQ|OVBSAB9yX8tO z0Zz1BWMek|fNkz{h`Sh%5g~k7Xv86nh+wGoU@yM4w6(ppy`9NGO93w|PM5>$CEJ4| z+pxWtRi#(l*hBz`D&>V%SAcT3ZcVnYNy*nQH6dT_25A^m7 z;uFR&g@b)X^1*&P1!ApF-EY9~;vVD_GvtS{#f<=hg zQw#O<5@_+G4I4jyzEl7TO6NpT$RQLfRB$I#hU8_+tZ|1_DoJj33581IAPLk|1)z2+ z$|jjqD%onSVMO}s>F?ga6kFIhsHou3u_z^p#XpG^;?fr!^869kfQa?7HGD2e{d8lGUbUjl)Fh5PKFnG~CO6^R*nrw<*zTsSd@C9 z<#99;3-=VW+$d*3d!jqhh4@$`;zl;zv z?XsHhJ;*jK5{9itK5zJ-BlViN-Hkx6*F@Q&4ba@A*nW-&P9{_>IvL2^7qH>Z+HU!S7)j4i{+9(xgE`+2MgCcMRWc+MJ1}=3 z;AMuDRtZVVUO%(+8nV$8%*pU;{cxS>st?eTW^`=@gNq|v+wZfhv&$!~tq_$b&1d0$ zbMlt#-6ZQ?@$+s zc<^w)Tw`XtRUR@lM?){>wwqo!-I(+J4o6tIa%E>FY9NGZ4Q|0IIMrf$%Ee_sOb&>t zZ#Wto8}s#g0#5jIh2X`la!7}P8hTN`kizyCyQy5*^5B6<;#uJ(nWx7+gGk7f%Y$Gl zMb|chK2pl>FM~WK3xy0UV{(S*f$HB`E$p=%nL&SAZd8qkn-fg|=6}DixX842RYqaM z)?2#`H&(Av7##HALo`V9oQ?SA<^dau4Z@tz zIZ2A?oQV_HK5~fb?WS(flxLY)-1Hb4%LzqA6V`AIVFm;G++aGnUi_i)r^AwZ(DG2QZ`gp>Q6nLIM z{=-Nu+TDJR(b#o{GGsLN2pc04ibx1Qm|3%GZ}OXTprN%jX8&K?AJ94LR$-9E6oimf z>>NmH_u>6iJ7iO-t@l5~h27;V=k=L;*fRf#0~+F?M<2UKo0|fdsyu4 zW6Jk8&qYoC;-2iy8>K=a1sYr>s>f#-)Ziox8LQRl^GcGDN+x5;T+U)iX>ZyjWFcUs z!qbqh)Zvr2S_efEZJ-KbEXHImEotZPMd^PBA>^e_>CsT}WZfKu9Mf;cs_)0_@|j60 zVMZ_^a#U!_~JZ6Q_fV38i#8It= zI<=yd`h6CWVVY|^rF<2lm>LI*b_`5T!~lTY1%D-;K2yVQ1S!ueShLL%1?9)@VERzm zLZwoVNR$|qP=2nfrhkJ_^4FPnwoXk2Ns1m;Brg*&gXT$Y2p?TiEp{Lwh=`3kVGXQE z2BwM%?;{SQu)S&6jaC3}m|c8=3+=z7{-4y_^Vd4VyX%bx z;ZY!-vcd_}D5VmKeTXh{W!_>d*-Mp@4h*>=iYA-2(I|b+M*6g|(wdL25=vfV^Rd%% zQYKS{mz&J~J_>U8FQ^7pXW1GU`S!f&W&kkE~*WNHM z1CEXj;*R`m@BPWPef_oPmjP>ZDnqQjY=N}8T-Feik6HO_+KOO76a^W7ZFZ~n@j?nH zb5PKgPr=zsyTL$<5dV{tb8SQD9d5<;nr%d$q0m{kNt5T2ciNZ2By77A|w)>mu*&6G~N zR2hNixg&DZs>h!ol>9M5h|;MCnnp33&`5-faHV275}?G!EE`CMSvEAUZ6wRCKVBz= zBXvsZk}O6PQI_h2Hc*jR>nY^wRxfU$;|qC^4|6`gUzdak=B!!!)RqZ;QpuYYR$kA8Cdn|!@soLMk^ zdi(Z#V*7?*WI!F>H~xp)u$)a+5E`7#R(^gn^?Xt@m9c<^xwtOOAKR5o3=-1AjsoCF zqsENGRLm}wFb`7&A_pr6+Mls+{2B|SgVs(E}piRag*EUQ*Bl&oX2P#YHq66YLyzLp-^4xro!ji2pI6(VTE}?agyTB z)|-S6bGgS)-}odRWmW|{oo4(QwRrtuD@S-_q}XgQpq1s%!Abl8^8F!#&RyH6py zv!6jcXFnG`{85zU#|R-*6oDc(V=@^%K9T5&t(~1BWMC01C06u-MPN>53LJB!TW8kE z<|^SVtoJh;@d)3jBR6%sNX)pU5{8kcke-eRA`whNDpwa&Ur$fKrYOzAH46zKb~+$9MZ2L2>%@%#oX-kDUAP@$^6 zL_+?Iys_bMu&DhRIS|<0Wl=lE=vkk^hBP<>|HKUk`$yC;DTGD;4*S=ABG@db3%T}6 zozz~@Oj}zHM+G#k!2Gq`yh+~rjzH*lG*ck3v(o^2lhPBGkxJ`LVzbSeS}(FBG^O<- zxp{NW)OwGl@W0^Q(~RabYTSPJ$A28c)HxF2zVwyXu9JvnKT4=m4^un2xjAy(_!GkH zciwt?RR=+_9vMaO$g+oh4!aYH!8oLdNYvCjWtFpA z@I-AbXCLj9BF@{lZ@%|osnQTYK$NR5UY?oxX1CovS0u2z=Rmu(ZktWQVKvsM&o{?m zW2Vu=!@1V)0-=b6%#*;}Ji*;AITnQyg4pJ$$)pj}+_9983h=Vi#aHk{$-Us8p_uq` zG#Uu7sPT!x(B7W`Um1o}VtpNOsnRp@)EV|xe{9?L7uZ{Btu{T4WA}QOmn|0UOSL)f zTl}A_e@Xii|C{Q+ruMhFfB5DX8-KL%N9okmSIK|FzrToo6;d%ghKHY=6a?+#NMUNz zJ3a!MZDU-x-D#Dv_WW~y!R!6P`02B!U-kK3WuL)EkAj-UGq(CQIV&%n|9CO@+hwOHcN;wotCKV-@YuD^*=L}|E(EV^R z6k60ctb}0>M0Ni8`LmV{F}1cB7DUfZy!TD=9BcGY5X9ByiUa&mdujV z8$w}Eq|Qp7O2iIYE>Qg*7Zy2Xa*_y~A%r|((GwI5PSBjJ%DzCb7ilAhoxSJ*o_q3y zY{KhKr3lugoQmyjwp0Id$NN4jdymf^7+^dIJW{L&ePUftLydHJxV?`on^m#VLXn3> z0JDbk^9Fb)-sU8Cdict%&f9uKrQzF=?fUbCLI{-Iu< zMIt#c2yw!3nu!vy4T8zx@n~J`K1TqVKxV&WZH{zsW5L0e6^tx3F>C^r+%q$7ayu>! zb5DQq7x`gxmLa)`4VxDGocdrZU4@lGEsev7PqZbq2f|XoULfXlG%Q5ZW>V0c4X-zs zGnd!P=3LI}Z8%OlG-okcuP2KZk~6t@-et;RcsMKZnAubn-D1^bj>RkKt+YnExDDBS zbJKA)EnNn)A&!qoPxaEW_Ggauq0AD;=Efwfp^~iK@j2Hf0X&bu)RGiZaseQy~jy&0bO4pDlB`{Ikjf;^aHEh?=jVCC+7^+n@)EYwG))QUTjiw z1C#9W+=*4gXc%nOXdJB?m)cfE0k_xJnm>oJMB2ePeG4nrc79GcNXB;)VIi>_PaZ^+ zB+7|`ZYAdfj~?BD@`Ro52Ds^yXA3Tbq+p;o?CK2!C8)}}s?o8yXyuzu#130C%jb1F z^3BapGxxb5MWK2JJEf8Z%HV{nQhHhyd(&nwZCKG5bX2&LZAdHiEr-oh8&_;Wjx3xn2`PbpcTW} zN{i5{6{u!68G4m7nR}VujWa|c;^AepYVQkr>~1$XZj@7NPoCa}y69ev`p=$ArSmmW zbue^!@2SDQzO^ip%hnZGfhcv&KGhe1{HU~t=MN1k@S3+)sx@S{Yv_4xCbefL0Sjkn zWD-;K#HDlz8J+egKK5JDOxJAGT*Pl(na%!ANs(;#aP(65{j$9g1A84GF9W7QOremGFpS{x`@C5o(JIgyM zZJw(Van4j&y|r36>lgjZNvnyJAQ2(fxz4T(k&v+#7ini)q`l2WZf+iKAnY9;?y%3p z%}uH~IAU-nhd#ER2hR@m7LBJ}!v zJ?zsrFksXRX@pF^Sj=bGRiSQZD)(R^&vAlGDa?^M>zVTrC&yz~8;kDug!~Q@XAo9a z!$_nM42#8Jp9$!|q@i;N!&XJH46~~tDT}hYUBO_bl!+BmhtUt;zkNI6EbTnnK4{o% z3lF!;4NDzOq&?4e8NFlqwYH^uy#d(yq8eUo(mj!}fsh~E=W62q3^&hN@#>-Q!a&YTE~*(|kKsP@f| z|LVpXUnm$ho56lP>BA`h)I3Yizr@LXU}m-q(njJ@GRNj}w;z~RSzCW$bM)xjc~kz| z&g%IupRa0v;Thh1V7tSccTQde50Ok~5*7`-qcG&zTd8SsK3_1oTuMQU@UgtbJ9qSk zgT3LlJ6w=_|0+70pEzHZfPOOa%gh%?1#JUm?Vwm-B8V3Ko)^Va?S{+XHn{oA+UtwXqtAEJRd#BM7`B25PZFv3iL zeefN=DXo3<(Hhdiw?OpG6HmI`3(@F;yP3s2eAEF*H5|jYqcq(ex>ow&gN4G?tBUEg z7AEE}Q6UV*(%0DDrgTRO^Ln9B4O8qJj&pFd<_)0n4vk1*BF%T5%6RnbOvhi6qUglQ z#6@}{L5tg)n_Dr?o=Dg=nZh_H%adwE!LHm*coU^fpt#RuDnkSqi`A*BjzjN`6Y>K@ zRp(}zi=a!Fv)PDrAK`(`8s?+X|NNh|E(G4Vy0M{}D-7zD2a+ib*`OerL(tc_V3)}` zk%qmnupnt~m<568Wfn>xk~h{%9GGJmz~rSqun}u(+Bh4GD^2S{r>)U&;8Q8AY=FVo z$Oi)XHC(J^1A#1(QY6tN6RxJ~`G^xpnHnH-=g<3u;x0faKHtZzHn9&N6~qC=#!2}D zyaKxh5Q1)ZkbSzm%gb$goMrSl+os34+&k|8&~)$KgG^ZEMZ>668^m_@{P~ET;~^9| z+}jNXJQf)o{Wp8v?!?*(LcCImv(MFp+r3e+_aQiqu*Gn)D|=yMX^C{m>BIMKf;QVho3mvrwlZ5;**ev0`sT6CB(u{yG4l>>mpli|#uH;8#bmbc-W>?XKG$ripyQ$+}P?_MM zBSZjs92%-2JbrAqg9GTcyYEQsMn=MPWMt0T60tEPEQ?2yJBDq&e}B#jA)7%dnrfr3 z@8IBnLt5wBGo_Q(ulY4$?$`Vp2;aiO*RQ?y>en?l3=m7X{QA1x&SJIEsFun{Y5)Dd zALjo4-zQ%*{+RJ~?(JV{O5fZNJl754a;>fP^hBeiRwEp*wXC2BMLd=c9_9Ae=}*1J zWPM@!+E3w|=B?Ih)k2}2Dzg;xrmS%XQpa{~qa7QCR@>GpzwoV}uVk)V$#i6_ z&xma8tp?TW*IxcYeROegRI@XYH@KbV-~Rrik<`?NV z0%x%f{8{yTt~BDIb7E-3zMen!mXCPU+p&N9cG&#Rzm08-jBK!|c{@X>P^{IQ&XYsQ z`D53^=GT7I;kb}ov|?p`$*RrG4xx%@EW@4>&73Kf1%li zx;&pGJc!pEi?y{y*-!;7)*8yrcT%Ws$UhREPnYXzX<%*9Q}zef04XF{)XnIgbk%N z45cWB5{49wVkl|dqe2!4|L!~QX0z>4QEZM1*&wx7UwifP-c9x#lPW2GUYDb=o5fSQPrQS+8lL0H2L`q@=ha|g(K@w7wx+C$h2T|U zwH|wvXY`O7Mi@+87@za%!1A)K)<_KW#twTmjdI*KRq_L6UhA?*XwSse z)i7OMowv67xkLOqGxA)^HL8_1m(dL@qX$?9ENb3XYoT&Q=QB%&=56Ki_P8D^*!RQgnlMYZ&CPlH7AK6RH^+Qqo9R)3+wx(F zljX3WCSuv#RvT6_{tw)-j&0C{6Z(B3?8Sd%)aq8_Ai2u%8??kQ}e~LsjcaE`7 z`Oex?V(e47lgY39bzzFgz4rR`*GPoC!Jao5^F%s}4#$|MHt!T66p@fulV?s(Cu4UX zZyg-&uid|S_tE-JG@UDE4_6i*FYg|fnT_g$<-=U11ZC##@}v8YcjD>9;nv#I+c(~S z|EBh8i-yNy$xMtL*Pcm1znMrLUqja!Hw3t1_p_TJH^k(mwG4tCA7q}8$kxy?RPldkM!n%AqiUfPM3J96hcgd!4h?acX1 zN?+SfWb*N~#Rrd`Z0sE5D)kb8EE~J=bioi5T1Xtk;qHi-9WJNpc(8Ea;a)Oo#cV29 zRcs?>K`&$u_Rx+s&d^hbduz*2kZUQI*j`&%xPR-`?aT%38f&#KwQ%=!@|o*=&7fR! zp2Pjnh0`PbOm{reRv!EC#nZm_9x0Wv`wRAfE?iq%>ivQ5pMXEm@u2{Oi5>_qO;(## zfTSGFRw|V%rF85NB1gEo+1h-1XJ=w~bmzgs%Erd##^zo!GXhJrH1@)|g3dALgv_qM zWU~1Kez!N!+uz^YHvl!lHLTIh?(X!kAF2`W;3-_68umT+`s}G8zrV>ZFfYq+I?VHY zVdQWNt{!&cWqc{MuS>Wt9&WSiM3K2iIN4K9o8!Tg2lp11cMcMTaP=P0S=o*CK6=Jn?r@gqk=9$!4T_O-9s{r-{Du)YJWxVF2$ zJ$C)&7hZnll@~8xnz?l8+{D=UTug-Jzs7pR`8@ltQU@3K8Regd3Z~!5a%dNS%T$lp{FMnJKTC2IHMV=`CL|#WMVWSUX&8aEY=S;clWlo_Y*~GVnAW1T5kwau~62_DNquqk~a_h zv3M+=f{9B8Xu}dTSJ|q>+$lh^!cY!WSL07Iffm41p>irMX!|0qoY=knushZ zSg$3K$-(`24SO8qjYmU*P=dUu1gtfRktihW&9&qvL>Kfde zZ$krha0ovcP*fTE;mV55CiA3GuN4!~DD+a>8|yH}e!770@b1s-pBkIk-_l+!$99(5 z7^Ds!X{C8xuC}JfXs@FUTk1fVtRY-aH4#;vHTZY5ZL?-Wm&EvQV84wLF4k?HxBq zv|K*9eqAW{1)Vn4?jJopKIn5=MGos#pufkbN*wsSGO@auUbX~uMn*TeY__GPI2y$2 zQ1omvldsJVi*|1i=H8VWRV>b)!O=daNmNv~A5{GO*~zo%Z0amH4J_?$y# z^;+YlcNJZZwFO*q=m9&+ghlUesiYKzjugv<vlkLcG0hB#eZ63kYBa^}o zJI0Z$Zs({CB)i9})xNP;baCKSJGG%bRLV%3R_>nmd+Ih=jas3IKXAcK*yjkHunXBx74o){@oimc!LM znvBLXd!tTMqb!eIF*9Z&Qz?5;phkM<>60f30CoGgMzLf_oJ(@}or1wDp|dlmLiUBl z@BI8P-N}~1G-wO^9_-|&LbMoPe(=DM?L#lVaQSr5-q_P#&Zc40luE3uF$Ka#qNEeE zD=<8|aO?dK>a|8gy7A=kZvOE*Z&mE4&zu{qZ^dA{yp`op0*8RSMVNtFETjf{P^;;c zie9f*i`k#}zF~`O@p{5EQw{qro*r9?72%iR(u}!q2><^dt-v3orz5dzOJuCq;F#^& z>mPlT%LRk4zm6uV5#i5S7t$pv^sTov>ahH2()LpG7xCs_W^|)2!*S=Mcu@iq z;Va6_PJeJ_5P!J}Kv+B5eh;Z-)^Hrxdb*fmPRW-(TEX8^rD(+)eY|*x`N1H?0S239 z#~^N343ooZ)QP0jbNe3lQmOG)g8e3KIw3r$N@ieEOy%U(fp$#? ziJUp_rb*UTIp~6u(MPwI(RcA;L$Rrr4{k&aB{V)UIXTjAQ7|xjr-B$X7@kq&oundj zX5`ehYhEvq6I0i(Uq93D7HVK9O4$ll=xWvAnbmT&n!vcO5GU z@e!wyK_(f)IXZ3_yrKOC&(pm!kwYkANFtTJr%#DN7=@r=vl};UBnyuoi7+wdU#{1Y zQqx^y(>V+>fQlO#2zIF7?E(>+ldT5F64{m2Y|Rdwti6_9TghhYHRk9MPclc3C}}dF*;Zx0eufgBlKp?x-hs6@@e{ z%3EG}`g%{6zLR>h2EE;7=LHJASe-jSL+}UuiIQt(RMnyGqS>3hX^DupkQt zmEcKB_v)JSsIWD?UCxddZbU--<>jQ|%Qs1P(;GglU zAxA!1;z*3rSfNxZ6fKq_i+F_6Z{o2(LrBMu;^bhBj91 z9%lW`B53@fT|ESD?*zsm0j*@tt<9hC1Hgo}0825UEZ*tHCHfBz{44^O2>>^cwT=oA+JLB^J`!67V9rp2|M$+e-!Vg9&92L>*QZBUOwE@ zC`F&%_(dGb@QXK|MoW#xJ#fCj<*hwkymwDKWsr>xT?b7zAb$YKEEJel$)KP>)Tosq zvMARKSW+1^ElhqyBY!hY`}@N^9+H34Z1qd_w%6vCu1OWbHjTNoc))kZ7^f-JZH zYFM3FoC{OPHF-e*So7%Wjcz|WnmRG@^rO#rOSkkGZF`ui`87B!(TB zR0W0*Uw!y4%b0$WR6C*T0S+K+9hjKl7P+2jbGf%{n%3qlNRAw*$IgVa8i$7#pK8QP zDpgByJcC4u&son(*_u;6A;S&ZH_7Jd#?z;b;=-;{Qg#-!`DT%O%KPU1Qje;I?Uc~N zyw6uKd1=8^Fg$pI6+2sZO3qqVZui1#XxZz7#Oon#;?fQ+lHhT`;W7fJ6ns~Z9;4W@EQ+?({gmaR!9ye)uyX*??MkdpTWhN%X>ak3$z9%FE!5!1@ z#FUl8N_IuxUWt(ySs`29RzG|q>2gPiS>u?ip*Jb4^bzN0c||FgBc!Hr=r!C&{~@06 zB0Sii%k^_AgnlYVtC@Ime9%ra%ub5hhDPIu6{^h%l0mp9hRqnfVa5mE(^V9B!ek%>_G0COi6aBr;`6Dlz zzhMygg#kzMPDbr#K5A4_*v2jZkXL*9cH*2pZNKQqxU|18khz<3u-j@M9_wp8W>32= zrthWg&Wz)NHaI}Ic4%(2g|=hS<1kQ#)uZTeh&q*^X)%RHMnWcbts9cT;y~-?YMR|M z7gzU6cn0^6o@uq=ZzdFxkW0Z-D#-DY<>9SG2yT6o;8y%jhYeN6vw9_aI6OJ1=uz-E zk2iLcd2nf|Tuqzva->|yt-}q`(`1cz_yazt!)4|oo>~JtF?K#&pM@(VlZhli2aWkl zHASgqa(eaR#bHzV-~oKv-P+;A26Jje1x`}c`w!Q10`o3@woho19j;zx*~qFbbP7#= zs?TL6>7CWhWWLgfc#LYX5L-s6qQwTR68n4H4pp2#mW8kr493iL-fXV%W|dXPhC!0a zPEYx{>JHx9sdBE#scfdoX;wC0SR|Aq4I|ga&rK&{xyGDre?KK! zeUq$}DMn00F$55n{e6h(TrfROrFwe6pe?bo*BF+4ruOLed+&YtBwjG!Q#lsRfS4ml z7R)Ztc{oaAR>xD9E?yWmSF@`NlHDbiH3*Hw+};NB61NH2s~#BuW0n;y7F{R2#cL7- zpHC31-u}}N8%+-M1)uSe{6fb^GDb0fuy+aH2otBLd!G*)Yht-3wfS5 zBzA~r*)~fZjyL#hHcgJtLH)Iakh2bU3fk!Kkg86NjUx=WKxb0%vooV|Et5omA5~R7 z%;pa_DOFX?e!oH_N%625fFVl^Ed-fR)7jgEgBf2}+05|f?tbt=o!r*WuCFsQnC)HY zM<7FHm6F-%QcpI^yeV{Q`pm_dS1tqs;{&~umzn8|X6d(*S~-*4-^Wm>g;Ae~zr3@s za1X7voG4Y$&Xn%&7o7kJhDrN;$g->7~;)l`enm*`XzzP%*-8e@7CipL^KQpF&bF2 z6^mkhp}ugJ<3oFa-4@FHcjMXLgY^6DCX3P_<>;O#U?$9_zrhnZ5Q;~O#Hrd%VR!o{ zy)F>i`DyO5-)nb(f+LF9aYG_|m|(LeQT6+SUMrJ5!n#am$55^99)iQh^sK=dn^Lb6 z(H0m5S|T7hBuV6re024}14?UIqru7c=1+FXfpv}6vz?!`%VIgfjAG)3L7_K*8mJd+ z28LNf6s2-}3zR2e7+kel2@2IStnyxrHE%-UQ#S`(vh9ATG#8J_=Dt&tHy z3^O~CFfrx^K&2~0!~pFH^mqu9+$4#EdG4zpY(=*Z>hJ|pNaiDizQI{t*0BFUjKE3! zITw5MeuB6!oIB$o@rMtzH<=jFXndou-e`7tDwC2Oy{KWYV+&Q=PL%9+M-dWp=CxX2 zUaX-9!(WTg@@1Vk#38#wR+3*|Tg?#WoS(U_U1N;G@Nl~pQ*G>@+h!w@KZxMYW{G~V zzaQNPjGTW6w}>F9LYN1Nz!j#A+MN68S{#NqK>imdh9DyC86LKRT1ZzAE@#sb3G3<2 zn>NP@T&7a&+XkO8!NBnUAdLUqy>s_8r55vJhCilL8aab*33Jom?wm(t?LGq{%q%7{)t6%-^%E=c$=_)q=PU*WQeRjGb{psas3xz9jI~Jq(6+a$Os&Xs+l{PjKy-< zd)Z>iXxt@oD~w~v2=GGPxKq`#v}Ca^FIz3;vPJtQTdh^=7r*8yo*qdJo6Wl|6 zlt0||uQ0B%V6~~%(HAaVIptUNs)^n4ow|JGm6?!Q+j+F`aI?y`Xf(`RW0;N1!gn(h zXGyiv(CiN$t!!p}=Pz8uidf!Wc&LrnYs`C$D3?}m-T3z798@Hp{(z}gS-*Yz?s{4F zOuhKh%jW{JHqPYF4TBQuoce~MMNTMJ?ogfJ!^K4>>7LXE)SksxTtOh|d zQh>lY-}G`s(OI;ry`gmWoy>NRqeN$rBFw~?({z_X!L$fzc&%of%r zR`FUDjiBV>JD|7g@p9PvbU&U!=IJ;b9g}i=9rt(Qx$wx-z2p0*dOb{3Vew%5$JsqW z#`k;d90wJKYHBc*gwqa{9H?gV5EEB`F_mEwtkU#Z4EVyHCNo@|@SU4CPuS^@v^Gb)h+R8>(0nT>vqHR_PY`%yj#6b>%x9CnYi}Xy0U1(1ePgo(DSWZ*;CYp?7vvZ~zVWmVF z_dwE`s4;T+^2v9hXWZP}ZREZET38kyKU{D~dnwJ7DV4^?22JP8JGiZ%I(shRzUtCW z)J5i{58nNNc?;B@#UYz&4gHntuUxz+idq*Ex%+L0!?VA=Gw3TC8mWb$-8kh4RnnR% z7Tfg%Lr)qbb!Mj{VFRB0FyTHv;Smx2VmX`s*FWjN(f9VB{MVUtnw6eCdw6*69DVR0 z5P+q&)kvxr?iJj`UATKegU~su?EBGwv5j(Ai^W8u2`O~B%w|Kgn#RxFeq1mLkMEuxR~jcU!2=$L&1x|VGA(2V zCIWh97bc95>6%O%dz@<9da4bKpPo8>dVGBB)Oq-0S4(xlWRZA*RC4f4Je6LxYj#@K zL4Rt3ZD71XL`4Z(IgzX852Fq%SB+At4RDo0D!O|6!|y)W+)TjiC@;AO&R)23=9J6I zOMO%JXWBc6N}3bzzwg=E@!X8ZZ)zO3GO6**EKidq(h})QaQ*c!5 zH#R-yvu)cRJrGUO17|{Z1$N`a&E``x!}<|7j!1}t1s-nPRZLo*S%yUD(zvE9T)(a; z3*@DjG=2}{B0?|R)joczAF>o7ZR{=df+;6UWLzx2J^em;UkvS$3*>HhKI1l9p)fuZ zwK0cUi3GL)OLNKx1_;;(?--k!eET+~7cY*E%{@P#gt>1=-4O#(GESC6<@&-)O?c8;z?pz>YOuDe?0oiT;a~br5wV@XosWlc* z?eg?=`8v@A$9Jz>{E&fK4>V`qn(@wjwWTgo0jZb6x(;h%{0gsrUESHEE4M6^~;jmTm|)s_(p0 z)uid#O|N%r>m-d$Aq_KPw+|3HzTBKHvjP^nwY9lf@$LmS6ma9Em&ljCbTVI;V}%}q zE0c^HhQ0harAfuwYsys^bWwm?cHe(h8UMb)I*l`Ge-i6Snh zZ*HNeC*LqFn1bA91u1e@oRdmglk~69eg7*K+|mDQ@~v&RcGBC_Qzn{cl61|)t;Aw0 z+(a-q0gBC}2tv~>zsWlRL9ZA4CGMohsByo4oIumNJZF0HWMH5?F!1Dwp(#u~$L585 z&gAt*qm5|P>owZ)cVFjZJ|~X}Es7)Ot*iHlxN1E&V!bbk4opzo&MjDmriaAo+`_tb zsF~*n$n!(SyGVStM1aVnrEJ}1tyZ#}V3i7mvc+61=aqUnZ!nQo!i$Re765$qy8Cs|sznVo@yRe9>H1l}1jNZS_)4wVd8il}bL#n^+-;Y~%Ae3CWlWEz9LRD2=KV zkg3$jRzxc(R-V{2e@*8J;1m!8m_=g9R#lLy1}{tDYi5%Q>MJsrSiHpq08qmazzjmV z%S&}$0=HKyl_*!w*CmOsS4#zhl42bYB@x#1HA1CIg~^g@+BFqP*90P{%+H%>YH+m% zry@mcc7=M?tWtxR>mtRwirFI64H+5bi&c)6i-j5|OPpLa!aYUgP~#cr*UFX{f>ES__dceMs1Kv;k2PdRm%u`3xCj_%;{G=3UPbUR>a3TeEBtJ`lDMX477rK-i`b)>UZBHA43SZU5`S9o5BKuPC$#ctOuKv!5)p41C@n@yRs7V6mA z$<0_V6xvj1vUOsgMP<$kJBPTbkZ2IJ4_^naK-KqjTd`DcH0q_I%}QufJKuiNT7xCF z+1#|=k!5PFa~7wCQ)N_MmesBk`DX=Dv6-Z>In?XGwBs1kB#foM$Y}v6jJ-e>`FsrC zisnJUUPOY?asU7$YGCt`FO&%<2&7TdL4d4sLkrZZwGy7J*Cm$=sBj-r@H!kavm1M! z_mh1$^M0bnPFVa~v7jYSt{F%QNPWVgCM_-H^MH7^-?-E{ zjf+$5H9*igMsqovRnMf@zOmNO{8q_GW`IURM_Ft}gA}U<0j;!ZLOr@C@L@+8KbHAQ z$rWVhd^;sx^Y3T!4ktV7LJ_JJi6_vNRr0a@{gd`XRv&`jx|K-6sYNQA&w&lDaGKX8 zp?$duF)6iT3O^kjs8+0CUZ%Fk#@>$h_Ie?GVjE0>YF@no9-5A)JQi~ zXlg z#=^oz-i&COni{m=E5jaP%twT#>)tR(UBtw&VJ&3T++VO$bRgG08;XGfwf`R&XuC!L z004La49P=a9#9Yj;F3JM z6;K#LUsp*GWl-NXLKEA}k7$7&wiia&F_>m&V7Xn1wRSyr*j>11AK-<3g?IJ?3hgia z107{;c~-VnS}Za&6FA9E=Qnow|#k}$Dp3+ zndet}1?i36gZiqkHd2u`N>ToeQLIf;lFd*Cf&m5y2FeEh*Gv{idjmlbZLyh|nXf(@ zLU43nI1b}yHZzH(_8Y^hdTNK>Qt1{im>}sGx`rMoRhk{oPD|O@?6L}_R9?xhOUyEQ z{%6YUCjE!$SG+j(5|%BzRE(#5S_BOz@q`$Xzeg=9ysD$#)y;@93Pc7kc6HCobmsVj zTW{0dlRw~D6|6G2{uME1bb2OwAP8|D52~;`Itn58PdBKBdc>{7OvEetN9q#1eKxa` z{zwf~u#Qs6X<`L;Ds618BYNo0CYtIXnMS3~6F=uZXcB&?@DCMyu}TB!HqpaWd`Gnh z)QWr5ekHJHTZuRQUT6FTzm9YIC$YgFbt?WSo3*px#@V6|Rh&3MnR2)-^dYi*r5=0F zqxR_-XW8!&?n$h@qub1nlM%|?(>GC*DM8#gO8o*2P>%Xn><@aU!<_mEUJW<6G@*ZE} zeszlc9oIUAF5@3%orF913jaB=g5HGe>)#f!N9A|{Op^t0Tt^ayzki;!Cq1op*H0@5 znNeImGt11(%uXT*Gcz+YGc$8yI%ej}F*ECCTJo#xRQGhhrmt#x5fIbKt%}U5S*&C`i`mKh zY~n-q`uhERk$3qr-)0}*<>!2fUrKyWk(Tf`eNR8r4E@`mMQ)@!PK(_M?gU-s9(GUY zYWI|TS~t4q+)KLIz2&~4JKVS2clEOSzWb$KcYlqX_C&p-{`zV(F#5DU#(jcO#wcTy zG0GTaj507J%F3+9gM6DFziG#0zg0_NWfjqN!SXNLpobm3=>|ZQWZjnJQ>HPlJf7qE*YaN~^U-Yqee*v{75MRok>(yR=(J zt4;0d(CIouXX-4St#fp~F4kqbTvzByU90PLgKpGKx>dL7cHN=7bhqx&{dzzT>LER> z$Muw+(X)C>@9I6huMhN*_Up6yvc96P>TCMCzCmm5cu)b9vD+m6M|rMnP`m0&NPl<&)K^Q|+7Yd$33D%G{lL z8T2IBy$5o8a^EfgRqngtb~7M|z7F~!=vPp6qo4C+?&bU}2vX5ru`S!_?JQ)^_A(Om zFBgYAcc}MgVC=5Wjr6^&KGYFuR&;gz&5B*Ya(m*>+qWU%e}h@k)x;HZfI;@gqb*`q z`r36CIXvBl`tDs#{RZ>v-JZ%nVHRXBHLD@b8E~%oY0rV?x41nO-CMrceVbzOQnM1` z;xM4aa=QImV1)UN?%QP}iet@6C|3Rt`{r}z0b?y^NvNs(DbQ;E*mUl+ZVroo2uwGB zpi6ScR=()1A-J+{Tkhm;A& zWxj)!K;OVOjMK<6$d29{Dj}>bNo)~=o|bl^O;N!gnpqvSQddt5Mc*XU&ng5HMppf6=t590n(@~=A1c_;D+sC z2boWHkkm0RlGlk;_ac8}IE&{=1?Q8(G&_e&*g4^r1I$ITb{LT+qP|co^6}gw(a|_ZQHiGYwGkWzgpDS^{;j(-EnuY@E5_L zvRkd!G2BlSv;?NcIQHM2(}lZ(@(ke_K0Z@;o{!HG9u)pENJ+_T;ep`+OL<_9Wtdx~ zGEa%BMV#C_i$N-Ps`V;ef6VWIg%Y_p`~`K(3eNK_w@YpYKuerg&qo#|k*|wHxp}~1 z$NbXPack-^8yRXNcjbl<@;9HeOmZfH@^ax0Hs`|B$R>1hvOb+Yo7PmfwkFZS!2t&0Js#T;{QuP)pl zlv^ch8r-5;%_S?HlzLT#upc|~687==+IynEaO_T86AOFgTD=)Q7Iup6P_Je5H|w1i zh zGHi-f6}%*>URC$G)W0CPWt=r>EeoohM!6tGpeGN>IK$X@8zxB?g)^<&1w@+v3G1D^J(s^GOP2=?S)|(zY zMj`9!t**VYWm3<{z=0SSalK0a4rr_U&*o&FaGuZUBstrFzKKS1mH_>P7XbxyuEUm@ zF|JHB1As%KX=VHOtIQ(xevsKGd*U(3Z1LU@H!d69lUbnNrc8(A1z-+ItsUIFX9A$( zai?-;!Vp}jd#g5e(^oqWRI@)u>m8E*Oub&|+pSk&y$R`;)Ekz*I9VUfEW}`>Ejd}i z25=q(%Sg^hZ9CR!KqqOTfp4+1o(k8OZqDs&bHpMciM=@;dXoadFd67X%|dOrRgU8$dH$@ddx7})xbe)rVIFo8K3Ojsl!%V35B%UMks-?tWV9v6_~ zNuH&KF{X?<_I>g#8k+uQFpb6){fuuJ1Y4Df20F{w$_P% za2lQE71*CUc#u)1+~k>JTA6;#w__N>Rx`{DXPX&m#<0VTH{;o3CYvej#mG19em*H> zCR4&1o?yjNrrAk+PD$%#)|9Ye=1>XyMM?WdNjtlw&5_!DeNIOh^zb`;Y>eglp2rDi zoQL(yPkiKuvE!#b|H!iZ5}+$S*)sfC@>_e=c*(k$hN_w%s)?fN;#HGG^@-=7NId2F zr^3}d|IG67yJ-lsWH;3(Ag!nG`_{_j+?C6@%gVW{A?L1+oV&Vu;zFKrp8~-c;Eyph zVuV@``*()575qhQ2j4@@(&=iK>!(#D{r-iFsG(!?0r2x=UWH!(et8r>0Q^ey{}a9u z_>J(qV2#e(Z!N>`r1V#!`Umi9;lBv~0{Fe~pM?(rf3RFm9z%qYnW~SWDKiK#VZoj} zFwP?d)YiWZfwmaa0lA<1S#K(}FZ0~YvLTh+0e_5fW|S(FiyWmB8C7)BF%-n08L_iyaI@PX0k^0EkiBYn-Ps|&Jg|H$1)7iem$o8 z2BPmRrGb>XS{n+dysD9?y2gA1y=Y^8004LajM4*a1qmF);hFzF)#jmWjHd#D@07ChilML(X8CnsMvy+?6BNi) zCucXqQPb0Ni#TEZrO9cWHoMUVlQ?H~VR{yq{AaKFLvL_<+rrY!Jnq?aqxtpm$flc? zmE$S30cdr=0gZk)A5g#(Hh#*~6Rao$~JHy&!Nw;JUzLf%if@AtfO_p`Os>(6Z10 zIKNy=+Yi&Y4-ernJcZ}*5?;ewcn=@p3w(ngX!J3ZcQBH%Ok^sTX9javz!Fxlh7D|C z4~ICxRk=3T=PZ}F6?fon+>871ARfkJcmhx189a{{@iJb;8+eQEb`KxmBYc9-@CClY zH~0=e;1~SP%mNl^@s?_7mSaU$W>r>aP1a^z)@MUDW-HpNwx+FXGq$14+M;b{TiJHD zlkH}EfgA^MupA?ixn0Wchh!?g~QBjiYFklkeuIZF1Fy<~6MMLd|2Pn$IdYEMPU;U@T;fTEtqln00Ci>(x>=fNYlz>69)Q z9%i>zkMv3(3{SCNt5KSy8OBVuXthd~OvnI;A3=I$P=;h!Mr2gR;F#ZH_$~B3TdW#l zacZc=t6`R)hFhWCsD@cV@f|!QEk9aJH<&ljX&AuVGtu&6{}%&tbui~K4!5c zw#TkG5GUY7oP?8c3QomoI2~u;Oq_*_a5b*M9qvE;r?$!g# znBzWTHiZ&*E^X+}YPNeuC;GcHy&24CCfi?RTIt>WJFr>=)<}W1$^siO3ic0SgJ?@v zS+XqbvQV4cyKU*+Ce5$b>fMv5ZZsLj=n3ZD9j418gejp>6$V}$5R6{95T}2He3moBCbQf{vdG&1MQbb4S>ry%X6Gmy*9#3M(H{tRb4(<8$#o#W9z)m`>}OC;VWH38!gb5psOjQ_w_{8PB&ACoQt|AswnD;^nY_@ z%IT`Wa$QFj9yg@E+?1-lCFOi;V7YFOYPaZ)z%t$C_^Ipf#?k5WsO4JZQErTm+!ph? zGbR;%VK5^Z&s05>eD4jP`;Z>h{o(UK_&ive?!!ox7+qsuF3=*a&`S5&GiF)zOg;_$ zu5anGRy)o!alDtup_TmLkXKOiANjP9@5=!>x#;PdtGJqLxR&dukMku#L9KHrp24YTInP zR%?ycYMs_=gEnfDHfN)<(b>$naFa^+ZDL%tt+@;K(EnVkAM>|q_d66f$1hH+s)k~i zRbX_-=m;S-Cwb&AO15&HSjbnQS&-Ajb+H|`)BJ}~h&^~OE&l>0;q(`H0Zodv6#_v3 zME~sKZaErW0hBHOz6o*a=wfh8txO1xk3- zY0zT8h7&#lkeI+XTdpn#jM^nasUV(f%*)S z000000RR91000313BUlr0M%91RqCtis{jB101V9x%^8{*nkHr@W-~K0Ge7`90002Q CLkb=M literal 0 HcmV?d00001 diff --git a/search/grounded-generation-playground/src/app/fonts/GeistVF.woff b/search/grounded-generation-playground/src/app/fonts/GeistVF.woff new file mode 100644 index 0000000000000000000000000000000000000000..1b62daacff96dad6584e71cd962051b82957c313 GIT binary patch literal 66268 zcmZsCWl$YW*X1l87)X>$?@vE);t4{YH1mFe0jBE_;zih3)d=3HtKOj};a$8LQ z;{mKizBoEx@QFoo%Q3U|F#Q_99{@n6699-amrKppH2XhZHUQxC)koh9Z`96Da}z^j z06>M|%Z~L6Y&1qSu;yQl0D#8RSN+!)NZ{U~8_aE--M@I|0KoT10055byf;V0+Ro^U zCui_=E#qI~`=w~)LS|#={?)gfz?a>x{{Y1Z*tIpZF#!PdSpa}6(AxtIw;VAx60fHIlil?>9x#H)4lkwAf#?OoR zq}|UH1-_GP?ro-XFe6E6ogAsB_lMb{eMTseU$Q#8C1b*`2YJE2UbHtB7q=F#8c?(} z7MH~UQP;KATrXR0jxH^-9xhh?btgLZV8`yP{4?~5t>#`dU`oKckttiKqS}=0h)-TL zm0*m)Fqi`0;=bZIlJL!*^OrHroA}Fuoxd5CU8V%At$}@aT%_Z<7=JytQ)D?oC4fu; zC9haKy!Hbi0eF1ipxzXiPt=aQ5wop-RG^?s>L>gO@@+lUXG(XGZgCD!0D&Zs4~^e% z(4?{(WBL;9gTH%!vIjaaOL4-?5F%AuAhqP$}Z5*a}4%FHO z__`OOSOe6f$5}vgbHKxcU-p9ue+OOu{ZSHabi?^-WyLLrt+h>i_s0J8MO%1(?6KJ{ z63srC7MKwg5YmV8R^udkjP>c;o0jS%3s1#VZSd_ZMMe}<_%<&|(8tdaVsob9SlD{! zxA!4>pO-DKVwcU1_Qs8{!D!x(rP>~w#&w_8M_z*m4KGu9`d7DfIq*xDA@Pot6Re`h`d%{lBo3am-vR=-J-SO9A>&egV84q&m&9c$A=5 z%sfs3V4GByk@8gn49E{h<(XwIcWcps58AEdX7(zpG>h`7(%)_eh+vz{k!pm%BiGC` z_=5Uzd3aO%4=d~2*uWjw8`-E&TB2z!BU(IgE;XDXw1NdI?B6(MBrV0BsbKgOQ)gVq zTiiW$Yclle$O3+`9mkU9lI}kdXSxZCVc3#pUpLeJh8n71U(M+H_oIWzXjf>?Ub;nl zgr}Vj|2|%YuvXf+F+N$AD`H8>BgpF)5=3ZV&6AF!QO#3~-9`j5fsyJ#B#%vv4OtoE zoN*Lf4;gCHrm9!=;fkWSwnDPm>OzFyN{<}u3vWw{2o9!32OW3*>roJVbmjZQzlG(e zE4}U2iH!Q@$Q{J!?*)q_&o{ma{Zw*#>>xizG(K?ovKtF`xdX~MyHu+y&V2B#8?UA} z3)GS+=ALKVHi<)w-QE08#-CNleh`G&y`sLDidTfmrv{gWy`!r=i}Q2v#-<1h==FuW zo4*3ygV;zyKBgxN{?HQ@hj_U+#I$gm{DHH5VFhB{&2 z43OeSH?8bW8=avoZjrZrTVFiF@fH_w@Xx3vrm3WK)B*ir9HxIFotJ&j?Ql0|_MlDW zFAFtz22CtP@SyIE`u?GZ)=dVaum({0Bk5$QOjPFeR;d)dg^tAMWb#XR zx1N+SC{!SJ|LgCF#-Y>9V0n)&ec+ON<`=rB^tflD@PO&5dd1P!f>fx9N5?Gz0tYaF*sLZO0G1fGI zJBmO(<#@h+D1mjw+HK82Tc@$VtNxi% zE|8*n7FS*<*b%&+mElheV^vn-j|^j#B3O7EpDyIt*oZgUdgrVD+nieQ%oCn z=tvim?Kk=%r6-5a5KYn{cSN(c#);ls)$rs z$>2WG89OeQn+$u%7X^jeuG!?UPZfU>)k2TT`WR;^in+~$27hvw5jonPA>KXZH+n=U z-HdTmV=8Uz@-l4RwROKIHX;)pYhnQ{-gA8{I9_E$1U2#W?a|Z=G1jId8eMbFB2X74 z`tO++;x+F#xG;{RF=LA2>8C&>LFr85=i$Wb6{aFrO{Wxnxot^AOP6_d{#zLQ$rDOh zmx8VSzye=SUQ$IMq75xI4HXEA59Fnh)i7cO!uVPQIAC%WY#)85)HZ%qC7?%_55Ys0-MmZ(mFLWpk4!|Q@tKYGc|M5aQKvdmMnP?P5ZYRPA@UcNk!m! zYM=N4>}|X9#ViD-@-{OA)mQFn9XsaS7Y9(?%-TyN$#35%!F`M`?q#}XOl%HVhbwjt zCD9hq%W@?Vb7iv9#SQ!^zs1Ahj*)z0u^gwJ$gQZK>LPl(dju$D&tWsLLmc6KaS3pr1Z2W;DVO|v_@95?1- zMM>VRwrEw^(?(cgn2z03cSM3w9re}A9@&J-iar~ThaWK;6qbgl9R+_nN+$C===>ifAHw@+mVJro54y_ie`FBKhGpGJfp{7P=$nYHDU85j@aE6xcjU`6`n+UdYu z;k~!=E%i><*SAqRV{@mB5+D#ad!{z`YfsejCwwfQ^S{HX?u$eA4ev+DnZ3iM@r`m+ zLRU?0^iI5+CYyk-JQeAW21GoJm#CuR4}=^0OawIPmLf^Bj+NP;px>mQ@ju91?hU?A z@^6NFDk5sm}DxK#dVoV-L%Npvrr+ooO@;l>4Y7QQ- zdW3cE{K)ywgL|nTIL7??f&XRGbC`}V$#eCsHr>w^yd7NU`;^EDQzm7ei3K5D%lm`+ z_NbNiy=Tm2b-)>1W5&6%wKhpFs?&aw_c-nSe6$OHn}oFM`AT6SSBsV1dD$@{#%ECO zaiNNq2pee!IeZP@I^E+v@_!MPqwA4mCt$2(@-z0LcW4k^>Eo>KuM~B@sNL97E6TFl z1)4A2mU)d_2f0GJOww_Oc7q4(mz@Oz)qi8`E+3Ka*{~&X^P|?>khUM&hA! za-0+zz-fA;NCpK8V8&lEAj~kov2%5g?yoc=(AvRjAGX}w(W#TavcyO)!zy( zBwy-z_~z`5c)^_D?7n6Bk6s#PY%1IH^>8*9DYTP!!0{`s;pmNC!t)DD8_4WWoHDid z?f}^jLEV%i`>#l)r6O{$EICF?lGtwyEIZdkw3-n3GcpRG_G3g24WI%{ z$9%gN{?t7?aUhEagsS=Crvcft)p%O>j4XBnA15^iRW@>yZTAu@VcFtzH z7Pjzcy@{m*?pI;}+Li)cVqSjK+o9$8<#htd>v|Z!spzHUXXhL2&VAWwmO>TOz#2F* zLKBCt%h1UO`bcZm61+W2uiv-$*AWdy4%*JD#Q%mVN~LX?P?L)W5)_vf~Eysd%ifN06o<4DrIb zo`rgBZ)aY-Er1H(R(loTgeRKc`aiNY*ov~%7tdG23sIk0S|&| zI`ym(F~+g~Z@5Ak*#hsXsk%wMma1o}98R11$`-WqDhE~YQA+mXDy(Q>%<^37G)?hj z+kV3owb?Lm^=xvbUF5qgnn3}%i9dP8l?^m`M069e_$gUu1G~Si$r#Db>RW?Xxr1i3 zU}3e66CnC_N(ryScVhF%p7!Zs;o9%K&6EYZ3oRWH+nY=r>ML5RV}UVM5LU3?&R^3c z*yGY}>NGt9GBX1LpI6=voIS=^Xvm|6n<>r?b&=nFv_-Z%Mm7gp! zSI@=w{S$c{z45YBG@x~lPoG6l=DOXaZPZVlw2+33otl)CnYysT!Y~2K-zCtw?30-Z z+j4f4G}f{>C*}kX%RUJeNc7CBpe@lm@?8X1D0HyuJA7fg9{pXg(i_i5pHz&enAz99 zWY3;MKvcgk8C$XtDv6Yv9nuV?irv9MVk&VuUm#O*IQgealiPX?FMl0-hGD?jlbT|; zME&f##=f<={Z30HDUKa?&A?`}^JL%n$By&#!^_LLX#Hw!dL^x^o6ADIYq{oZ_wI$f zBPDV!nu9vX(9U=M4q63-<+v6a=_auzKjbnp>~RgNBkd^lU158+SLy@%Fg|_0De54h z^rK{5>e-9~goCutBe7pS^s-`ZU@;qFoc`@|Uwyz__~mA3V5aaYCZ<4e6g-K3SmT;h z@it4I5vQD*>)Q*Fk+6`Eb4vzkclOo0&Bf~(wh1Wr-GBRg!}h;jXKPr10(}{2!1D1% zZnFF}mr~=Vjw0b47Mu_oQ`l$EqB>V3NVJyRF^Qh4r|cIXJIkCIu|e32zE3D{>g4&%2EEepV0ihrnN0lI*h$OJUUNEJ+f5_s5*kt zmQfjSrXy0*UszZofNBGqi063mn#*;wW}5WUXL;JVcPLTyPpbj}@IfE`+)C3>1iy6( zj@xZ`!%VYN^QX6s+4^nia$?ubBc1sgz=wkk0rC;u!2s(j`^WgqwSUq;DL&UAG&u(% ztx2nnfUn_>ZkfgUW8E9g}L@NcOjYNW~s;MKbcH~h0cpk{_HWNdfijblYz+h2z03P3!{w_^F+Z{6(m;mYyc?e=$R~S7W6r)rmnhc^ zWDY8UgC=qhHXPr6E&p}OFapx)Yqfq0c|%ScJfo!5%;`l<0^eYMGZSctYCudt4D;QS zllZXAwPzujN)eGld?PN9>@xFHYu!q3RYPgwD4^+{ZX+R4pqMO?|LJJ$&|pqT%}z(2 zws%$GBS~6_4OO$4U!NF5sidchXC;p!pWSoPq9I=D?mxL{Zt)>jI<~1LE1+Oz;S?N` zsjnlQu+gxjSKXW_*MzO^o#-wU70)7mu(uLfuB-0YqK5E?-e-<1nICGBYERzbSu?t- z1J9I?E{8Qu_&Px*?|>1;GK>itJ}M{~z2zc|c`DfS=_rwR>wbvoH*rc9Ca=CCq-4Jh z+IxAat$A_beud7*u*t20_~6e9o9BJn_Ho1ME|LyR2HWhz8j>^3+Tpo;1 z#OP$C#H+-wZB1(eXsCdjH8Y>Be8*l^l2z0+y_nU@-|33tBxzRwJX*%MM2dIi{#=IoY<7?7I@41JDTMl z|9r8UIP#bjPm~nR+<#Sib?~q)WS#taf5E>&WYVfkl0n+1X*26v+XO>&f<8pb)x%vS;$rMu{Rcy+BTIL?an0i7iczQl+`d} zYwfz$K@_rR)TcHqJ%uE`{3$4djVoPQ;Hn?ilq^IOYxj-eWN$8weIZ>f`k+fXTv4XV zxXVid5tejj=$k{SJ|9C8d_7#uwA^RYU!2J#ik0bpw9U$J7X!0I3Cu;srmBFnZmXU! zu!~xOmIrL+e;d4Fy_Yn8BTM_b>7-kEqBb{bS3=bJ-^ zArybG{xTk8B}Ff%l0yRj=@m6PP)-nCvyy%R%;|U!{>YrP!}BK`AZ-hu>ElmSHK=&> zEupkk&(|o!b>Z|PcSs`6=3@`isI1|I>wG~8HCk8BNXvslF zb2qb{NmN5#uR-97^5i7Y3#R5QJ74sp0$r%yKu?ed&+ivClsUAJZB~9o<~Q6;L}dp| zgxwnq#X_ME*@s7~+yMyT#C>E|gD=JjzeA}2|Gfez+Cs^Y@3HvO`zi4Y z2oH@RhUH`=t1aWXIifih7aEhgjrV*`ZHH6adZ_+ar&ZyfD2E$B z6i?p|;Ppl5a{2F&Nn$CdcSjfBzTQctXYmW#oGbBx!zpUKne^JrV-1O*A zte39UNS;l(F=?FNaY}cPnV{;IWxW<}kbX@ieFQx@krv%HfvG%4XlKg9O7V3+8>hFt zsZ_-g>;fy72bHS{qLMf>2diP8r87W*IH+%^i_F?^Vcf&!KcIFoE=h>1+K_QCN5_s_ z4q#&aN9h^Ld$%bf!>GnfOUhgzxE|*hE-EA?ojuK5A@-75Y%0`lR@w?JsH>*y%6tpk?I`Tui&N%cfoY1R<> ziTCSG=en`fKl@2rmFUkA)=$oTW&^T_;Wp@KWjYX;@4#NB@x@!36O)_Th#4Bu=8*MK zKC=NwyP~_@yce6Gz$)Y@)bwMU2i2q)9rf>$?y76AlgTZUdG4W6;#_}FOmo!8WcV9? z=tw8waqML#6=2IOVbtwANc83v@=3>m-{G0{Ny)8;7W=g^yEtkE^>yoYbICa)d+sE5R5 ziLK%3zGNws91-!M=Gf<__>gK>e=N=WaVosXzjacH1QSgiHH~f)O#=+XaX|Rsy<^PZ z+N0swA*aXW@XXfN_}RltlFet{@n-5?bzS1KAire&KbctG3g4A!B3yFxfvaUB0=oHU>7e+qgGXcrRVL zaJBKZ_7?3UZ~OFGJ@XP}4U>$LdyBF54(1j_{1m|hWwpUDgwKj})AR%%l7uYevu|w~ zkBOe1zQNCkzkSc_-nZ%ZL1wYmEb(6jIMU>7Yg+K%!3ogU`%s>|sEID}D>#`ArT1Xg zY3DbPR2EFVq|exiDiMyL{;h7zv1OiG^7pKqV>Nm=z2UX6`q@g1l92J6cc+a@kZm*I z1)8d3#;T!<7VjIabqo@eyQoJ)37|fr}Z$3c;pZLeiyn9}` zOV#On7kX{lo-U2XtHNsMgs1tS-$8(nM4yol$L~+TU_|hSo}B(aT+{L@Qqtw>&LoFVZ&5)JcX<|jF-?{%dp72IDUzD0V*CKhi2*j^8=68STUt&br&iVp zT&BuNStFLR+Z&i$V42R4;X^c+lSmq13oJAc!GbaOKI=Lp0;>JnzgjCjp67xP4qg9a zdR?9CTpwbT3D8_T3Xu@c7&a8<3RUEg#=nkbg0w+8cqc?u^a08zbMm@Aj|2z%eC+0^ zql|__mJH(p_&ZY9I9)`pcdL0P#sxFdeI2ZfGdQl2{heylGP}w_1jKaz3a+xS@%id) zUXNpAXIJ~d{kp)a&3uJ>KeBkF0>+^h%Q=^5J_{f0O-z>PK22*&cP1cXs-$D9ble+= z=~ByXN64k!9VyHHrr*1R(d9x1ns%vcOG)`V zQ)GPJ#*rwA?dc^MkkKtXkNRsa6q5~dJ6-YNo3j!4o!ms;ejpQ=^?m|rTJiRsg{K^5 zM7|8=3C>L;f(3o71q@ZNtzz4^=Fuj+G^&VWgU!g5T&)PxJb%5;=Q=oV5ZTVL+>-dx zhhj@57~9XMJMd%ThH!JwXU+%2)FLU@1Uk_VOT~m8v)Dkv{-tP3(1{W3lsxylL+)Ams{`mFkBBHjmQA(dV4hlVkETa_SZqb@%q znl$-FD&x1SE-}P^LFZj6804F6E=n>Fjh=Og^ix@pmsBrc;SD;KvAb}^#tTq|XnPVJ zpT2sEeG7j1wQD4@_IZCbtQ+%9$cJfH+nzm7ZuJ_=8dWlMMAS=kbX_atKBec%d{?j6 zMT6`Wiljm1dZ+vZ>{ozBVSFPAiexw&_`jBDO04g7sG4t^{7&T_s(;7^OJkPNAk7EeNPJB+3 zvnI>9baeSf@IPpZWe^9Ev^W9*!{4{x=I31$Z|j8kg4qYeZnj)K>zaEC-uPo>RSdLE zc5^nm$Is!d8}Ln;f6P3~vKgXj)_-B2uSEdl}Se4P3<09 z^@w?vWg%xH_Jh8+7{G4dT9PLFNw#Cn%B3(2XpP%XOtP_Pkbs9kV z$Q-3kxGQq+N6qKq^axgH)t_hF!-n7lva+Iw5CB1Z-2D814juglNK5g0+ch`iw<~fn zBWiwk;dB}#ap%1RpZax*IFkCNe69y@xvGr^2Afgy<;hRjPZ&4)J9UVSLbPd*Li8;& zj#t5gx0#(>uO7y{KHFrUSnY5iQ0@N6dsnw_XV|c+=cU4sBcs8D_UkF3q_a)o2PEyF zbx!;+GWe_i*JgQHGt(zo)>&;KdH-r4|K=fgzy_@zMbL|azNlnsLrvmF=z&Dr_F>=o zOyF^3ZU?9&s$M>Umkl(GgqVraCNJfNUCn%G@b_nHt!Eto8>uzL_&DQ#UKq=` zEOCp8rf~adZdQ?Loa}6dzb~63LkY2ne7g0#S%1Qt>FW9*{J};0(eM>Uzxxx+Jc=Sw zNbr5M_&QPzoZD-!SVIZ2uWzT1bQFtWLBLeutjw; z$)QUUFgL}$slTMW_j9~~-^lx*3A=|OsaHGxyolndAN+|6ft0Ht44TqVo7R95)TnNp zQPr`<3|W_hYJ{+oFnY|oclbRNqpM?1ZI3)7DWPW?MC-KgzoKB4o$cuW)CsOirDD1w zYu)U^(;c3@$p6$5*I$McZuo=gLiFH--|M}MGVvfh^UWW1Xk z488s>afB{8n19#I#%Qg?lGX-cA!ZQ4>3`_FPJvUKpF0!VF%u(QnO~)ezL2D@n4T!J z^TLk=W9ioU>M>iMaW}C(=-VESzwQY4UB6i(J)vX3hlOv*D;9`p!YA;Jo09ZALCS0x z``9xT+*}tmjgwkb^Ht;=)Ha!3m$Ej3da-!tbc8;59KaUhVqo*5YWio)fbPmVPBcs1 z+E63@FJJHMU>@vmiQydDtYDEDw-;?c`FlUhl)EW~JP2Mw#)x;w4hND9y52uN1_s_U zbd_D{vg>WVjMxf{SyxjYYv!SG;qijw`Avz%TbMSMhM?mvIZsNd^g$c$N zjY3h7e`WP_q^S_Dy4f4fx-AJ5imltL_1J#=C9HNs((E^m&@8SiY?#ONNoMOI@>V{| zzt8Ato5|}rgG6+Vlv&z@Jl89_!mE$lDYbygNM$O9HcfPZ8)J&)hQ5)GD`$Pp07xQF zz?AEtd23`xy<1Ka)JF^Wrs@gF){X)*UPwPU%$$DHY3tQ6>{Qy( zI+f9}N*VO;dNX^!aO=whm+vK|KxofHRE+nIq|`WcH)SPb3^IW+jjZ=GtMEFhD9ZBe*g4qo_y3(B`47t?#J9n|fsREt^6+oZnYE|O>VMg+UqNs?XySy+NRDe)ZhJ21Dg9^xuAx;~ADlE4?&9K+FY zLY4OquJPQc%9&G=agFz$sVapHEv;W~Z~-$7(71afdx?2z$CZQEcPm+W`E#ptJe_EF zNs=>4HZsJh-4Qn(h6^Ly;cS>|l~Oy?Vb**xPSqlKMvd+md;Jbp5$L(AjPu#&qk;SC zAt$%M%wCWtQ^L+WOVlob&+GL-GaUCk#gJ^FLpSQBfr6E<#a#buo+bMG8I6`=zw;r!Zr#``Y6%cj7(T>{_-N(%43famwv!j2H*;aMnE} z3GVb9&|gq~f{@+%UQ0=%)KWoB_Ja5(-oZW5k!XrVeL$#1)yf?DPP>*7gtBIkO=2|+ zk~!gxywqm20328+c`k!6&&}#+`iC12b(fR~H@v`kgQjgjkhYliLxiiTJFyoT;X5wY zcxSuxt=;A-b_ohLABKbb?a(Jhv(SoLXjJ*6#VgC^Io-IMR~6zl(u$kjz>u4tzd>T> z`OWiT@O8#+O-b3Dj>Cs(NV8K4hT@nw0v)>J!1}~dmAfC&V&Zcm*7+tb&a0Z2n8`=t z%UU0!STkH%} z$Gl|&T*vRGX=^F|=5m3yDO-g-DW8gQsZGYyk=GWZYos0>I=7MG=mlij%mv9*cE`-i zOfyQu?`5;Xqoa6A?@IAVZTZ+GKMps-AN9#tA#vufqKlEtZ$svUYH7;UrL&7ymjs2h z|KJgsm=GK=mx9x=_IzQv$QXlsJgVYsJOU@iW2Aue47K{Mnr(% zls~)ux`ll{bGrQkeB|0MiR_WX)dU3Fd+OF-Ge_2T_8?>Be~_-;ZvT)7Zx!wtQpoYp#(5_i;Y-fOez&Vj(Be{*bW0QNL}yF}Evr-^v_z zz`DK8xp-uCA?9=`PCl{K9OF*$Cm#5y5;OM?SL#}a#eLWpBhNG~@!M4?Z$4jfC!=gm zwl??6gY&C;;dY!;dQ0gQq^Oe0;%f}`irfoFJIxYe)A6OkkC#f3**Mwr55;81L&Q#h z4uWd~D;nFML_bM6Oc{`GjE-N8*A4VR6tbVinQavNGX(AZ9ne1yAqUQbT+waTR?Mf- z(1^OPqjl>UaH%1+UOZPb@dmn)9aTIjh$&r~avj7?&MSZ7ScL*zE({Z&cFZKv6Rs=B*a|GANc994A_xCl+Q`(OY-EcW-Fv$LZe zgIZN8U4pg4tAIGcvk0PLjwhoB7aq8huIOyN z`E5b`yf>PB|DN`}Lu}QTO#It#`Hguqc>QFXWJDlzEvMW0boIu_)MOBy(+b7MyFJ?xJ&+m}|daP2c&rshQpR z)GHe(QM5MdovXb$_%7Y(vrNMUtr4Yjn!qiQA=ixG3GH;1o_+P|hR5akMmE-M*Ms|i z1zcxF_VRVeWruX?W?FoDYr)}h6sI*;r_srH#qEkqTOKig7dN0^n|V^>(b-Xe>rT4A zPq`G!qtB#EBi#=wtL+upix1#Ta)5CyiF1vB6@sz*`dEY%4RsHD^&B9-h4mg`dY8x7 z_qZ?9dG$;j%KN(2{QcDTEikCJ_Yp)=duVdShqLMXqUZcR+3_cbp=_-2mp(`Io)J~S zFAl*AZH*t-rHT3z-tb6K2+XM0&3jcV?|oi06Z^?-6K&(f?2Z{PdVr08yrcFtJ=|C( z=PdRx-g375e6xI@43*Vhqn4SE;3Yl~Psq70Wa5WZ^LtC`1H@ip$VdGCBQf)3_^>k4 zr8Me`cr1T*IO|7V`=tNF%G35Z>{6%pImj2~0Q;yab~CH1QLk2})BHu3Nua~R0DD-H z>A@MT%`-#?+5~~3RlX7mc6-3{YnmIpgXfG=rKza{J>QoaRBXcUsfJY*4uWc4>uX>f z;YN5AT$9%>?^qn-sI$j#<{O|-pa1DOuQJgXN#A`IctZ)`h%a1qXvX{lQzj*xYo&<$ zIb$i9ixGfSF3|K1a&;?++Es`CP>1Sx_`Wq^a^Se*?(=izf-dxS^D=3}sYHF&%Wb0k za~X?P_o-`s4p?eSoIb(zv`qwQMo`-^0!B>BB+T+wm3*IbheA#Hfnr))SZBHSAZ z4eS_C>y$B@v{{G>!U8*7kWc{peLy0kp=;NT3SR=uIp1x3KEH90sVP5~g!6&rn@eo8 z)nZ&OldlPLX+U5!^1U@L)6d%grvfNvT7d~YvxXx0yJV+JW z>V$;VyO-ZZvijEI@THu7SJuJ(+inZ3f0%=5tYhab7?M?1VO-R7eYBwUm2FEiVl{W` zZsI228CZIWoMRr6?Gcg7e9e7Bm3{3${S-VrdSRM!kyYZW<<7V>3@JJj6#^W}Q#Oyi zN%4)!(CAN#GA-bbNg-<&troPLENSK6__zm49n`e(>h+4tVQV~{ntLxMDPP2`Nz9UJ zH_j{E7~py=u6`1GlT;;)+-1FmlHe*=2^YZYYFIU}s3x(QEt;e_dp5GsE}GS;Yjfwh z7WJAw0GcYg)F&#+_2+-yZTA@Mp9OM>drJzdj~zNDCUWcYDbb~6$2~;H&5@&3F5uyu zlpzWm>RN&8xG0O4^Ei0%)0XknL?Gpx5$Fvbj zrjP@9?#yj#Xi7eUK;y80gEP;1%|p0ir#CX9vKy}2+TlYwuq!QV4cjgh&3SdJ;^KdA zrd5@meTVihq&d?MrBRe1Lvi)Yf8#DlpkWs*b>Dg(qi}a)aFM=VoUPy8)Vd+T${eM{ zn89PbY{>3iDWyJGZ~XnG9eM0MKSccm4XG;XWQ%qRs+l(S3R&(59I)|IoeUosjNqhM zul>F@wJs_|#T-%vEua08J4^~3u%sFcdd&PM?upyceQ%p7e}XY*D5+1vJLo>+gy`M# zOXV{DQ0gX?5jtyb$ECyt!sTCR6s&`L{8?GvqU`*yxEA@yX5<-_Th;O~_UK4KL-(=U zgY*m8?FK(arYzh(_X*T2IqCB>qWd2pI>l;Cdf9nyNZ6I0^fkMVV=UN4-YDjfAN*9y zuGA&CPxFNRUGl;+pIsOao{pxAW5)x0aySe1>=7zh9G#0S{5Z@B+>?cFp0qknz^GCS z6Bl=f@_agDx+q83L8Vgy6^e|c04=289z#@%)S~3u$sGQ@#O=fR_;%re z{piCv?e+oLQf;nbp!Ya-t1~tpDHqL@F!dX6y%tVVF(E6JmelcdSdJpCHb}2;}aa zkk@zgTc?BFnc!0xqF%uxtrDf|_@ll}db$DzXKtS0nY$x)?oyw_<^k($+OZp!^JV3t zqH5tCLsBDTLEhi8`b=bhnJ60o|M94@fr80rc=m=vRMl{963-HZnm{mC(<||dNX8Lw^k|t^_-o{YXWA-TsoICH6tPD%?-ZfK2mpkDK zHKi;bEQ?_1qCcToxpUrTS(0QyRXrj`DSAkSu&^t51+cny?fdvNZgWPtp5Y=K{br>y z$ueJ`_-D~ANmmIx-c6(N{tjp;N!Vgxu`cM@hv^ve=8GF?zR zK=wg!M(GxY7zq#JgTlCd*rj^aIc%A`z4T~MeoS~-L$7tAqO@8?D`jRg6LZnH{+iH5 zsqdFfY~M#4AN`&5w;;*w=>1y3etqDPDNNQQ&;*UP9xbpL-8+bRstIN`Gjz0UZ(J#` zb5V!yFAQ$C^iF*Ib-~qE{BI>0DIP2a8KgkXn8~2JW=rs(roFg(d+xQ5{G~gRYcLP2 zvpxnoOKx#=3VU~tZyiKjK8;euXsnS*G_BjL2ozE;;ozoD*-Id}SCnyDq>g6J?ac@q zYtQz3*CPn8_C^exl^@oW>{DwX=u~i8@NFfLedDg<$f-MYd#yOQ$?3lZ7x=P}MZ_iG zlJ7>8Xab@bK@qRtYOg5(K;I+!z-N9NsOl+j{(mxiPTW1=EDeEB&S*32c{p8cAq2 zL-QEor6gyn{fpi$?UZdOh8;}^EcDPo46s&;TWsLb**!d-^UK>_-1y-}Jcu(7B{I8x za%>O##Iwe=R|0O=hR*i_5)Ix4L6vT%0M7~P=zec>+bfO`jH5M3@8f!a{m`j4dquPR zH_iLI2iDDHSElfWyDqG48tP>a=%I z?|0#@f`xRF@)L76(_pQ%Z>Qxv6_p$PDKAYWr_i7m@tEFPv_LU_!9@=I=3%z%KRi(a zvdOJ~bDuJ>*^y(lGt6XAHu=?Xk)O;_{6Y>hK9su*UW{^45yDx#At2tg!huQ5gq!;z z=bqLpDqHH1c5Z~|skW)Z2r0{M99}}a3r3G4=*rc`o1JiVEy*8&!Ih^?7cr;?Jipx4 z{0FUX?VG?B)}wPC&QD1c#++01q;9HUv?#Tm-7)jMX=Wt!dmbh zpWusIE@O`jmu8<(HkOy4|CEQLZIkXWYm;jei4t+)W!kBf@ML|H#M>~a`_~=ee(Nt7 z5Lhu5(x`IZgL}P!kOziuX$zKO#1s-a1Cbh;&9=*)O|~Ff4w8+~ZmwOZ^Dz1y@ATWP zV$dx^85>bx^Tde_2v(gX@_Mn3cl{)0J=G5XYOBxqw>_xj1%gLdZBTu_JvfW+f%)lQ zT6o_EhwP?1r+_(RoXlrqNHAfIAkVipcMEJPD13cfBt*f=UozVzQ9$;r(#tyc5g&fB zR6ilW?pNAe=MIEn_5bBVvx}U`Bzego8U0XWPM`I+oCWeI9UB}|Nrep<_p#0X>{z5% zD8~JGTyqiSu5rgWKXX!=-}6uS-5Z-b|AZK}v-F%&S(6 zEPe;|5fF5G|7eKpC2P5Hu@ zxXbm|NgqQx`l7Vy%KtK|P9APXPkOJ%QcpOaCG4i4Xeuyhb$w?AR-fN-UTc)L+T(FQ9VOHyPqPrC? z)grB4n=O;n**2AA=1=Yq=_l0n9+A}L**0X4Vs)YqRQZM)FQPynYW>(j->PDH{cQA7 z;z+-c0;7&W{q09lboEzA?YUd#mE41DMVt~D8t3GsmyBw{%2Er%A${%Hx`|B`HB}X_ zb4WWqF+IsX-IZd>y^L-)bxC!Neb{|%Sk{5uGyj{FKk1Y63yBbEX9|}MiAnBb500$5 zx7VE7F)#S1oo?g71etXDHPL#-%0NfmLs!}NCqH}lU+8C*GAJsH^lDL>Wtj!_RD`?< zaHfiI*blCmi>&wQD4JTq$*Z2GuQTg{;sK5M-B^^eh|UR8=khTgXo>kx50V8|r;inV z!)B0AhurOYjrd+-SGDpEThfjoK7#SYCsMWY= z>P7YkL5+9PBB1LBe=C7)A={TPH?y=;=u%4D>q4$|kgI_0(cn)AM?EKQC1+_ zKtX`)Z&cci!uc8Au;pf$*HS*@=7AL4=I*WYUQyXMoirTQcf1}d?K&q&=6^RNvgi~4 z9t^(us$1rfxe|!T=JH|w3pv*Jp|}^Re$@y;eC*>{b4_#10U`K_`~zK|CXzznaLMSQ zM88*atx|VQ(@>+G8n~djt&3|BZ!4f%4m(OHQjz<96m0ixKXfpY-=2VC!R5^CnxF*( zwKtBn{gb*N-NpN|qeQR=g8@KpQXDmac0nBla4)}2?r)G1c2LXIoX%&_!h&k6Zlxe7%cZ#Cp>b_Z#CMUt7GEg2T2-l1VO(=3oEh!?bzm z&>D)f3*B74eq%kzJ2tBGupu3k;ayq}f_rR?wA!Uivbkqe^h;{{pyZTmMSYNUz2Mam zlPq15NX;Kirpnns63I#}cUF-qq?ssZ6s^~quu%x3Ygls-sb{0Yz-X6y!kiPgQxj;a?=n<*Vp3XayHTD@# z4+Kx|fC>H$%O_?rHA%z&Yz09}1$an>(m!E8bJm-s_=QF?#~{aET=lUZEd(p8bHhpj zbu({YXPZHzKrr?rBoC4T4@#lLdWUL;K;Ark!9`|;78CR+3c{Aad~tXIOpgeA&ZUi+ zmR2VTFF0z@#$LX1+tqA2=K&wrCwY7rOs`~@J&hC>7;KjywBz(^PV7X=KY0fLj!^;d zNU((50g-@?a%j-(qJH@$o6S?V#vV$Rt~eGx3rs4iQ#%^CdhWq<*{n)R76NFhMkzy2 zgK@sU(m#7#K)|0Wm<;q)zB8p{0s5w&D_Wo)z@`@%cpZh~--IGAE`9K=mSUS+>^$Xu zeqW8$3>z9&6tWFNnqJ{Fn?-b}uvg_^%?#7R$a4K>2Gf1aBgbo%X^QLwIP$>pKBkCB zLO%UxlLbl3sjL+HZNntR;+Q;`GOG0Z>jg zmlY&Wc7YiVVHw`nZ>%*#%7Fo)p?~SI=nfO28*T;G_pQZ!sD4_62;v~;%j#8D z*q=JSpA|d$&6QQqBQe9VjC3 zh9o2m;i>M00DtxAVHEMw4=N1Ew(RWiY8FZsEiB`*$`=+<)dQB(=hiOOK44XwAuHy6 zamDmm^V<^NVe~SilUnwr*1p}T=C(|B@1tT~SQ3}{otzI=k~-!pS9H;5pCu~&`THa+ zXa0_`E<-ZbP}YXe~ecQe!#dJ*3NoDRAb<jpsxKx1@jJVeo=*MjpnVj( zEE$NdEEJSe@?tM9E^x};X)+Cdi)Cl_Gr!OJ`%D@q_N}2!8|BRZV}VzIPC8Y)kO!em z{P`^`La-O-bi^C`km6*B?ZZ!WFi%7gX|RYiV}ZrEO-+!B^(3vWxzlZorFZ+20AI16 zsk3?L%H~0FvcJGb8APAmE^m4~a-zvw>U_+;8Ur`Vij3nQ8f~P81WH49EkQaLNWm1t zM7o0H)%p{oIs0dG`uoluD3^0?Iwf0T$HO77n?1>O`-8||n5atn!MnX@D_5(>O2uAz%5r!#A7&QQqQWT37#AdY44R=aACIL%i*Vn zD1kB+ac@8e(U6LP3w*FU27y+5TGSbT6Xg9MdctdOHFnfeh0^6c%2ARj7G}QA9~p!D zIC~01GSW-?fL3JqX^ZaW0#x-9tbHN>hA|#DYRNY)Wv`;MB7<9ZtgUO&xL38?#n?eZ zq9(T;=Yh;D+iyktMfRK~xWASX%nuWkI)~qU38o5S$uN14?kQm(Dnq;Q^F8fg*cg>TA4oJQ%ZRlia zmQib%rxv0jS0I2m9;|A*qlIusT~9EdAgoJq@~=lMuzq?k24_6H&Z7^>VHNKb(zxxh0=$Op<-76-3k7Eq5H35 zhiuHU{rGE*qK5bYJtPvH6!(UZpeL90y+hvpwUK~&!I+-uL&=tfRXk!4fy7<>mg0tM z5gF2*zxlCKh1W~S3>`rYk&WRC+a;pEAN9SXOy{ff`2gWH#@>(9XYxcmc_BIEiJg!E zP6c}dE~s#gXT3(@VPW28<@VkUawKroZ!OpS$FM`CI1r;~oRo$Ph;w5?P;}beNgZMjCx#g4!?? z!&LY_^-$vBc0N2cSQCj6NAI6f>7F|H2m*!)h5|37#U=ZoIu=U-3d-WF%34!MX#A=^ z%z5PI$)x4R;g^Y+YDSs6oPji3g+>0T4J#P_qWe_nY`>vwl9pHQlJRVc zPR1Iy(h^veY%P|fu4G=7Z5WjeSRsYh=RsxWXQwHi@)BLmi+_`^mUI( zU$+l*K4j(~_z?KfLxfLCT@_ytJ?ZMMYwP*yK_XV#d1PFJtFw6I1t>;5UZK!F%l^{B zoxcsbS~yjiQVGh|!N?pHqirr2u0JA1#vzF>YU>%X3OYaK9$z?qB)*g}h(%|(fe9YD z^$pD7c%k>HaPB?O#14wkq{Zp9zD+XCE6<@^w`@k1H=u5Dtc00Q~_-C_jie3UGaF zF7FBlP>@V|{o%B^XZAV+>uOr0)LlGr`=^`Ix6(8T`ycn%zK@%6cAl<1P3K*ujBRi8 z!N)~r8u-{Ah=u5rVTP>-G0~EN*`uRe8YKQ5eSA+7LpC-NM zR!QT<-p-KjZ(F@#BAk=EU80_U`f)b$R91 zh&lcuyf`*4ETc&Jpjx7JH<2{6}dyAD#bMhmt zPI(>Lz@=zngFxv1B>?~l6D4YRAPv{OE>!)`J2ZV~?_1<}%&vLDdbr%N0S-39S+h`~ zf(cRcP^+)rJ!-yW2ejKSi^F63JjdeYhH`?Z+b?c=;Xd+)FWpscIf$x9#ZzwLPxnvy z_CkH|4d36FMx5ObxicOgwbyScPr0L*n;yk+upRv37iF~9@2s15ywam9M@lgmuIfe! zs3Pk`TjHIXez0JR4AVjXc@(8l4M`^$FojP1_1G2fs5i0YmUVaf$sgd8zbAXYaBIJ4 zaPR>700;nj0HD7!AOJi7@L$BVUm!F9U;t2eK$t$@-h6HVfLYCogCVy$$YXoA5Y3@xh)+T_)!ZjoX`QTufJRt&hP{XVFZGdlq$*Rk~GED^ZXW-&Wi7HPzgu`!Dy4PQ3K<( zywFs-+cCOHb!UPhD7lO9((Y{*j!=gcgpO^J>OS7vRtGo$`9d2+9Y7 zHHKGd*OE#6pc}7nLfksM}n%-ekpXs9W2`}q5{ zEbEwW#6gl%E-O^p!L*8bGwJHe8J9zh-kzGZL391=oYs!L)pafLQvMO*Fcl5~V z8P%27S-LGoH!k&H^)dA|?d#{)$hY+~F5J~{>%X@JKrQY*M_fE_)pG$f?6K5069Y9Na~@+#nS z0P-$QE0Apf_%5b9FmC|9JasY(ps+%?<6pynNabOge{IbXu)<9LaVpT3DPEL9U^*=3?(8-QjidsBtc1Z6$#8Uo~1tuf;mQO z%is~(#lMW=AL2{?V^&xv=Sc<}$2v;M)TJqLRb(@dV3DdQd73}Am}nGQN9HMxb=G-# zr1r$_3ghMHEB;|n#2O4|ki^)E_8lfS%5?A_E;uWb<)9I%n4@(D(h+KzHG0J964jf9 ze~iP-T$|K1rE`k)822_FY67YVR2jiCk*SB%(5vKgHRNiFxrA~>_sa2^lDJ@Y0At6_ zrkZABE1uY5v}J3_tQ z3k2`W+69lAQDn;SpoXUE9k0czguLi|uSK+m(&}BVHRGn08((njr+{}S&5c6eFLo!{ z_IKL_eg*0Fx7!7O1^xE-L#Pu`Owj$;kDMWlry#A2&?Jn^AXJIyCWvGTnH3_{ucL5D zzVl-xtWy9vmu)W7NW_Vx6Y-4-0#ENeBoDx!wAO5+I`eAtbCnZg&l>bQ+t6kI<$TtO zH?c-Iag&77e3CQ?)tG~03O7lQ1!rbdYJrP|UV9o|QR$h?d$z9$g*qx)L#Q=3*C=g6 z=_S`pFZ3C3NmUi0<4JEoR%~S^pFEpipu1D z)$y|YMV-#VwdIa8CC9F{^FrIy*3q@dOHJDF#2)HHIJmBqU9sD`*M-@AG2c=TE(*jt zm{QO{-$;CL%s{NcjlFRz4>uMsOphpLfuaHiOWd+3dSTeyiTX&+!QS1byO%d>0?{8N zB@oaCH}>eW!#ZxUy0e%`^UCxa&#X-|k4!r_%w;oQ z(xIgY1P0$%akLD@E+c##$YY1f*wNGWH8&%@9QbmFDqb5!Be5>|&Z2kgepR|Vppm|@ zzP>&)Yp$Y&HsXxkLrOr#8z?XWw_+Mn;B2Je&&{XWp0c4X@L@d@eSk0^w-NMzrobJr zDh0UGS^^=oLT;wP#%fzf`go1iEbo780mSluHlfSw#md;xacA>VDUr_4jYU??O$GNU z^)Z1@Bv454(0gvCz|5HcHhoaZkCGFY1 zBL15WE8sgG9YuNgTVz&AlXQ&$II(fOm!2Y@tRSy=SLju8KjS`UK^)l`*NLo`tT8U% zU|D=1d9z;~n!*8&P5k8HnBb=2O*>FS5o#7C*@QZHb1Xy4BTr5M!liKVCvG=)arM=M z8U?^LX6X+BpA@<{yENYyo1IdlpJ-HpU4>n7RAkW)D(PuIug-iAL%F0`e)}P@ zF0wZj%WDcn6LE{eS8WHGoHR{ha49V_Bot#VlvD1LA{&u_l0-J!Q1QQN4_X1QXS#rr zg2+X9qy3Z)`|n|rtIoca2a%&xz(1V-JiIFc;tJdGwsYL94|b4K3eI^fjJ9XD*}nI+ z=EDv#tBFKY`)FH(xHhSlmhj3iZcjN~xq`?5`GE5<0N!e8{_K7V#(e z=I56iKKyZna&ofkn~JG-0Jc)UrJq*`6mV;IXx#^DHUv7@-V++5sMAstmb*iJda>x6 z(C@R>%bg@3ZO#uREUef2(gtUO6vur(Ou8S4uezfBpby(j=$gTa$6MA$e!!#QE9*|I z#&MsDa|pJ1U$n^}uj>$5h_I%mcmQaId6-j$6N69KAM!-Bh#v?OD&g*FT}Iqg+Az;r;Y+l zV48VoQ)MbOdayno99glE@g2}(W^E2NfqvknaGOAIXTFKq+NH z!Z7V_J?breAgSDl(|F|iVp$zj9@(5~C0b3rYN#PUsy33YgKLS5K^8B{MhH=`Wb%j> z7Gf|--&xy(c;HwXfr)Y*l00V|0KTIcl9chy_il%DC0WlCzm@n9 zcWe)LLL!maQh};T2yI3B@`dG&c&yxQ@vS)l?o5i}2ZF_lLpR1bFVTWou5F(4Z!AW= z?2>bnsezZ4QD~%dW%9E0E-T9CaW=Wkn7b^i-m%Kfx5(*3pV-DtBSS7X%wX)-0X!LF zw9O}}cZ$ASB&ZjmTIIH|&{h|oQs>9D^FE6k*loa-@^tWo3F5ewm&uGbg3nK%GaKn0 zbZ`bd-}1{t;fm8#QUPZRhIZQ@OaD82^48c*!Qi(G@x!&GkiMG?E~rHx7LXbRC(8K1 z;GS^%5w>%3AgucVn9PN)`Tu$>_f9Y5PYBcAPmbSswj@6yO7A2%KtcxS@PB&F0Lmb{ zw|Bg^Z*d5vueWy>_AllEMl=QoW_+(8Sji7uw4C3-tAW5YFAO*aiZ2tx%xg`5e7|=< zf=obw0jGGZMEDs-yrRB7AVA3){4dh5JD~9la4kLq0@&@;QH9Np_5F3+`v3KYHq5qYD-Y#wFh@AZ(B%ghdn7P!NxVO&ElwQJDr& z@A@T;j+)N3KB|P4IWA&@qbUx?2j{827+bW-S0;k)G4=^rfZ|a(60qMC07&LgXyy>R z7?7Rn5UA>qy&Mom>`~cnA?R*teHFCU3a?0>4L*{-f|499n>8BJeiK-})+cRM*Fe!o-Dq1WG4@-tk0yb(LOUO^sTAb~&`N$WG>&uuf99z;YaIO1;F6$h0 zxGN0{4J%HoPMc0+PD@(7Y{XfUspMLb))p(W@7Le;+G*kG^$LKRqFTa^2_lE+Ln5FG zH1d8L+|7!i=QHXnBx9$HuKC;OvU1^Z%=YoHZSfn;YE<0kIoKI9_DzW63 z!1EoK;v6^Q9Pi^CDSsq~s>e%yQB2MKZ)pI+rQesDqqFffFfoyRk-OgyI=HA|oCX^0 z-7rAT5NyMCaUnWFZTgQ58VHbzK;=N;LEQxGjqFA2Wos$Yfy!LbazE|MRbofLih7k4`WE3lp!O7+LU5KeMq#~fmqCeo6J6Q*)nzcOo2v?1pc0S z<_^m4mLcyJcBdiBxqj3PpM*53-aM+MeR*_Ulk37-r!r0TLa}OY0INEpUA5($bE{;+ zxq93s*JggsQ~1QIk#;`lyaup*zJXIriCgr`x*=8pyGdC~h7^u0l-N+B2<^#2$VqcP zvhUFh0N7&O`Is?kjoLW&+87YLAqSWv99hHA#XURBJ-O5)y3{=s-6M|8Bg+j!oHRsP zw=^6|l7fkRMMqi7$;w)$D#L}P<$CY|M1flxNKP^B#G+S<`OxJ24k*SWg|t&tYrB-? zW{Dow^nqAF**n4k1;tS*d6fK>X7(6h7jq&s3}leG+9{0 zAw$TQbYXlM3Vo2_vCnB0o|rl| zTvIBJz6|@Orc-#+F1^(d!*W1UB{rE;`_r-X#RTSZm^t2GGQEY684MY)iz-&Fs=o)v z60|CzXI++58biO5u04{$j=XV% z`L28Dc9<8(TXrv+AV?yaGNzWl2~SbqbvsX0)AiD4rsw@MEc}9Tyxf2FuB~x0$A6|Ji!A(QdhsqoN$Q!l7WfjMHoz>v1~X^8`!V z+_`Kl#dJk;)7+(EDhCdp^K0=a&9+B~c~GdpY_DVFPv62V`=DT=x%l&^pMbrz{(mm# ztR5UeAlffVJU>VhBtq}7HBde%fahmUb8LG_YG}aU;Dp@x+Vr55n4F}B!ltUO;*5~C zvbv6zu(;Biw7jgSilXGsz{>3U$j0b`#B$C25A+{!Y)2^cUp+28O`?PRbgXUxwH+Rp=!&`}1O+oK2-)1yFUimoxl z)uYrVxKWyG)ROLsu%Mwath0K)DXvj4On#XXH?;J_83dE3v=HKq1XoD4=9Hb$Q;KZ1 zdd3+E(Wg`i0y9pQ$VAb(B=x2wC{ygrdMe4e`q+e1?}1c@f7p6X#CVETr`!X4CnO#? z5mx{pw5L#-p_whDsms9uAr5hiy=4^Lg{KGWab_9L?oC{5rtOpmn1g}Ft#wSt_JjK< zWE(83ApUq*_&cPsc%h0sV)&iQv|H&xfNvj&deJjt*`~N@#N4^ZJ+*7%#rCUV+`?0oFxes z#VA7IOHey}rEGLe)G29uQu_9Dq{ti3MQpM5XKgIwJ6DqWgPhAPM^M#~I&xNFMufp? z6<5fE{{-*~w2^7v+~*f&WDg1^+1Q=SGourJOtFSw&g#q;kPED@!yV8%m_?BIx3xf` z&L*0h*_KXs5FfZ_uKyR1TkH4cg;Qg91~G{H+5no!cZ2>ZM=%GYempSRTHTmw>Z(Z) zgu?e-Z#_*jQp1!hFS6MX92`e;5^~37^9TZD;%DOu?+32^>>ouqF2QvLS&oD39c}jG zR%GLB=g7*1>3FAQjuQ`|+(78im|DwZ!Zhu=;TVPk>-rI1l5V9E!~PcZo4YZHuXJmXS&w)mN?gKZXn$81IO$5?I zL0YHu3f15lgTDAqh3)|+QEt*MwuGYYODLO!S5(XAbF-T|$$`#|#}2qL=0`jQ6X_3R zAowK&5IKN8Ukh~{tJ43(AXSHykRy~sBvlk}NXnP~sh}4tpw*lksRs>{ub{wZHkmJ# z=!D7Yv_G9LmG1Zp2!+OAu$XQJODL60rL&lA2Z~6gR;f3cZiUKdHD9eZne7A!iN)p& z8cTD;5G$HZ>$Ex_t;cA&UGum<9bu{@j~C5UplVwGqW=MxsQ<$R?`1?v^3^Z9(0SPkzN7z`Gp_255- z15)WsMw{VEjt4Yq&3fyha+Zt#zNO7bHO~he4yWVgU>Va1t#-TP)o>Np3m&)U{pC;v z+YPVx`~B5OP58g`*5IP##^}myzrfu;I==_?{L?Sn<||FHO|fPhzK!Oo9e2@ZN~|L+ zw`mDEg$s-2+EkZHGhpnsLDS~iC8pe`?31ot5ju}GD&42dm99M*JC6;n?Wf!qpIssR zw^cIUr;HgHh9%|&%)K~F)B7|((+r!~w&M)DfDkkd>xkl14cm|uRSlb%rezJgpcvLQ z>!_;cx=2)OBd)H=;*_mMdKuCQYct+o-4K@Jx@HsC^}KciKn00#7#~D!Kq1CH%nQeU zSPK{w3WLpHIoS%C6w5vi(+~`S{6~_FCz@fJ8*O1P{XmxeEO}v?eF6_HK?JPr@HLQI z(dUdR_C5ur#QO?+=RKBLRAbkR?{!Yjmox_|^&tm;a8=?@$EpB_N%H)d!#cY-q>Jz0 zP|NkQcR2)Y1Yr~aeiZHP{p;B<@7XXQ^xemf?2f%@7?!JY!5lCdO^{&WLE<9gLzLvk zv)N*?JU}7Q=nQ(3;cQST)k=^340N9RaqJuK+cET=&)bQ-BUmG^1+DGpShubdANl7;aGW9Y+k#XhM{sM}`67t6(K$ARdRLi;RJ zl{V~Rips5R)N==_zUo2WyL;BE61q4i-#Txz#z9FbT?y)}PW3ViwxL>~ z0mjKQuF?u(-UY`YFNuwkz8l)vIRl4b#UzbhNyC zuX12_u~fVy7mo``N5y9k(}9OWW*@i_Ghhqa5$W>YvVIv4Gfk*`Bd&ZWSKsFklsi>J zCyf?&By_Jw4t;lN71}E0(^hv!?UFZ3j~9hX-ZG@Lrh8F#=I@8tSMUg)zRnR&ZM5T+ z?tI>3>#m+OylvH11G)DM`qEhicQD|Bg4A5>3rByJ+cfd42nUAhYcday?&T4W6}Omk z_io_(N(0F`QLv)2;I1D-W0Qx~*xn1SVbJ3TkM7X=$J7!AMcAoldZL@ue+cKcBCbWx zjb0Vu^>SPJ7B|uJF7Bmte5+30MQ5J0zO=`lxqNsqG~lDGdqUgtEvrTmP>U829?}&t=p^X zFgqi%udmGVI=RN{^ka_`7E<0sz9Z8bxvz<6UlP>po)Y{mJPLN<tNU_Zh? zq?&Gsil57+9up#eYjyDNgr{cOeJkQX=rXJQmQ83Xgtm z7Bmmc^!eT_A6}~;H|+b!LaiUje#XbhgT+ty9N&J@_ujK+(H1CEDFsRI>#gz><~4dm zg|c7EvB-K_c!Z8ZdN?#>pB5>DM2C-2|6jRu?Qk3vLhz7LgFp9;2xaL1OFF8DbEEx| z;tI~SCEiu^yw1v2p}--9wDX=qMqOY(j9eC^l5Q1A%ZesX{xFQ| zA%Y$hESfd9d(R#v>25wqJk0-0{|u0}$!vYOyXhQWJXXHd{RQlT*kI;IPR<`Vf49XX@pRgZ9ja2h$IK#oz?;;sHmt?@I~6p^`Yov zcwPtma5^yBKVf#i<57d^}DW{}Sy?13A znS6<4f|>W@1v$}!5Dl*71A76{>bnW}rbINgQYz~l?4H_xv(v*|{mfpKUh~0j zm4?yiP+_cWbjrI~lyFY;k07(k$XP$=ymaYQSo^8h?i*k-%ta!fo{G$?l0XvG_i&%W?PSYWux(ykS_}%|KMp@W z<)&~0#-;knw0<3r3(?4 z*Yk~A<-_*ij5(y=8~wFrlVDn7#5uEM7rMVtLaA5r15}AHk^OrfBAKiM6fgh)-lOCD z&H7^W@_XikL;v2u=;OD87$vSjj6^0~oNGP?#zHsCwg`}XbtGWr6y<`bC6wNJSQZHB z=4Hd`3AY}};pb=k*8^dg-aDA80aWB68r=a=f`9=k_yPFoE)Z%ot#3cMHK z)(#DTfk>>EZ?JNg4@n$~F(@#f`yaGsP_90EIuu$^%q~e%(%D3`sVU<`M%ARjG3-N> z$|{aEN%NnLfUB8Uqmz28)vZg3XRx$Hs)4D4W&4g+a^CV(@-rTY5i^t2oI4>gJ_0q4&m$)+_V~s+!Qg% zQj~vGk}}1yi+vn{+S<7_eanl~?kS5?GRF;$0v+W%3O^NDnqt=#u4-ac%qpmsw9cWQ zvPdmrQ~9MzkLHdoE1GiFJ+7Eg@?nvCA8Vnk!9RKx?7_6bT6!ODX}w|n2*FAC&*ZHZ zkzvJ@<~$qGb41zZoE}l5R)_B#yf)F}hMDdhJ5lk6(eHpi@qYeGyYBvp6q^qL9MHL{CrS=~6qy`BE()|<22ZF%{4Gy3BA zw)~0t;Q}IRBBCPf2_zOc&X?u_L`?9Xeh`D$TESJKY=mkE z_`yj+1g%J&A(ef|yM$y_q@vJyn6u1BVbw!^JZinfn=!lJ+;V=js_ehDCChWin1ykx zuEw@?imS|LA@rwXPp+;sUg^97zBxW@iD=hh*@J?+-d6)tHmgjTDY#>Pr>vAM$0|Zq zl8UOO5lzdS#$2tuD;QV2td;{;ijL5(SzRkWheWRWh2FDEYA3w5-leT(Te+9~wCRbX zyWA@VyVjPKnZ2}oGte_&I&=I|1U2$p1pPi6yp&OK}iH$00JPf z0%G+6FyM~^n)Kn>VXK2ic2Qp;z8T9hq@`s`0F<&VMxu>n>qRs&a7TDg5}j;XgEk?r zA@jm#M$!&Y@gAn$Y(E9RE91q;DU{J`=>^k?ve9gzYla#PdF!%A!@Guf6m`oQm6f0* zg)K>*QeCCci_z-|X5v@I!H*{HmEN$WAs>1b^ZoB@cZ4!0mq}E3MIpZ z6c!<4grR2zoR!8(8Wlq+p_6&W7yR+r(b>^2@jfxfu{6=AQLk~kvA(g(@DPbKiv)_K zjD?LAm?ato8+{w~9)&BFtu-%GBA3q27u>(ydtS$1zh6UMeP~)#6_^^I*D-9mTs6E3 zTNYPNKOU_@t({p)FtB5&hSijqz_lnUk(ZS&qH-3e4b|#dI=XoJc=hw#?m4m-dNYo+ z9eDR9TLDaK{5S_O4#G-;X{yyU$wQ{L1_${LX&zIm{6?1D5|nv6%C$XS$XKow;*n z(UxYN`Fdu4A8hjMW{$3h-dJfep2Y;uf&{9YQ&LusL$z1aHV?J8+dAdZ$lY`?M!2W7 zyu5dHz1-M%tz1nU6ci8wK`A0BN)SNC>uy`Ii*Fhq(iQ^0-Q_J*J54W58$VagZftIZ zw#c~+l+KC)!s7ru_7&}(77DUu$asfDA{CU^=`OHiD*b_>=9SCdK z3Hl*~xQ~U4E3J35m(RDf1R3t|YFYWa1kmNFfD*z6TVHs~w#S#Cwe4}tW}L(0_ipA> zABRQexw{|-`rF|QA3FZo)4v~EpXtJl*W=#U`>=16{rmY{W7wLt^ixRa8^?Dv3SVEj zmdZ()7ju9rMREf+D2d8hLt|}sS2?)i?DRA})6v>hlkH}wr>EoOuq^4-t6}-9+v}w| z?EI=2?N&&BXQLvF#!%!py=HAnA$4>WN;Gw3O@P4eIGFep=lyv%f)*9@Sc6P{3go|T z4+WkU31XHjohehcJK0s!^ZmZQ{D)${JDYjx4~+hivK%w=~%&b8TAF;M2z=)q(3=yLeG2(*J0eI_(4NfT{dzIl1YLgNjOL3s2|i+==U-#6lmGNjjorL zk%2|V#fl6Rdu8Qghd0fR?h^u2%rgZ7 zj5=DoP8Oq}1`RdqnH#5VzFm~rnAiqk3BkvTTEgXGMeG9wAzqmBw zJgy81tn5Pn;jsF^a4>-`igxs&hWZ76i5Ckw2-f`D6TV!zkPlL|T6=ly!bu>&a^Wl) zXt`n`8ECp}0cLTxULhRmS17E^t!dk3?Avt+Swxm#D@$GMZ@IagKST3*q{b}C)KX8+ z$A>R_xCmRN1;*QfJuV^s0JmaAvFLMXJa9$RAc0;k|K~vT7(1dw9(oA!4}Rl{F7I z6YVv3c{PWtPBnXf2~V{~1BvG1B?{X8i41yLMZ_#n{$KZZ=-t8jF6i{hNAbkurZ_coZ z3ELc%166D@o*>ab8c`!uRNA!OOOE=9#U2uTv8IINGi)wSyR9fJ_`l2S9RrEDU-u=l zD{E!RXELNL&^ChjDN~PGjJhvAI91rv9STm&BxYu?U;&WBNEzQqReUtl@bEUp9b1y> zl94HhXsL#h{mP2bWYpwC`@s~@m)!Laqs>G2B4#N!|1yDE}j~>b77}PNzdYxbT zL$j``C>9lenC{YmIdL_kG;>5+yjtLz^;6bxb7J2ZPCYF>_Swnm{W@h zffoE%GIRfdL)ifUb1|dbSuqiK(a&lnmBn1GHcRGj{=$M#yzH0ha`PBuQcz|D2JE{Tx99@?!K>3C( z?COjCP(C3hzhfd77@G-vDAz+7LmA^xJzJ~4qMe|4&C+^Tv|iGC6Q|mQy%c$e8YIvN zcu_1^_f`hSNH9d!icp9mmn0e*^fN0`%c)nPNFkNb)zXYM|6v+Z9b!T+o|u?0Gc!98 zRIrEk@g@~I;%+TE#!=?nuq*haJ;`9|sOUWt#(c)xRt-^kqDWp26?I6lR)ucV>`QH| z0B%{eRW6rnBB_MZKxKq={pa90*hUib5Gn_Gy8|)`t*lg{7gPma{k=yb*TJ5YhS){O zubtoR)>HJ2rN|c}mqL$ez+G=w&A+>*QrudOcs9GM&lg8iZp}(|dJC^C7dQBBpU9F= zWn&gvYm`r8;@OWB;+Qf@nNYU&^A;yWmFKr%1)^u*60yke3C`xdruu=S0Dn zHEWizn&MMs0c;=xKDU6<%uH?D_=wSmDOQa06=>#dHK zruB3@d<+Z>Iqa4^?}sTiIa{{hLgaTjG6CDF71wz)nZGk?3ECp_iTSsI#_6`np zeSFbI79N&)XY%x`TRu;eZ9#nq<8DwD-ax6TOs(Y8%v$+2TcS!T9U^hkk0YL*AkJuG zr$7~j(A-?@IsAJx*DH3NG!8 z(4AC&8}}|-wPQU`nwQbxa5@Gyl-T;Z zdfEPoLM&GiX{bEiGG#nV@o%WF)=c$-^G&B8(xKjl6=cX4UwX?X{ z9onZt#eH+P-izWybK*&Yp>YVSM8l(C8`@f%QO)>_vS)U z>NaUdNR}?W;t`Z&)m&W&&n`T>^*KV4C7KSm8{3__!m6sK?*4y@Wyz8>SS2>|{b)H`!gYk1?#iFvvqUh;x8F-j8o6*bcc4`PaZ(5y~Y+R^4 z4;wh238#OaeJ(6I1v_m_2?{)0KsdFl2-!u$H9H#1NJwTrxq@_k8{5dvA?;it0ys1K|vv>J($ zgxstXc?4laMUTr^nEnEytd24@ntmm{JHa20d+HAy1SIsM?)w+}8_ea1a^nrrdyOdh z@-bfhK(&?9fbTy)AJsrR08>JaUsmDeCN9c>YZOG&l#%0bj@;A2Fdb3~s4G}tOfHt3 zEwYR=-i4sTxDe18Rty{;>#Xw>Z+wm?xu!i#==6YIGDMP&K4lO*;vp*>Uh$0CMg;tB zFvSR-k%Rw(K5W>;c1dD0rZ_PwqBy=cdOyS#92bMsR;(-(2g!?t&g6>{QY*pGvfsU* zm}y1!yyh#dNA%0Z6=4d_w3=rwH;QL2$QnK~Hy3Gx3D7S`{6ybE>jAqK!vI;)Ir4M0Chl$znD&n4H0ILVjmM`m11Lrm5HqAtm$cHac=sF#grkL#qq#5GK(--$SUSm z;ufi_V*lo6^NGWSd}8e0XY2VyXfEUu<6?@okV|aIx?HQdM2Q^Aw z8NwLCBx83sG(Xo*cnsF(+6iO9PDp4~8PS}QIhR!XA7nUsT?d=szp0Vp>kaS{H1r%PO)+z+m z$YdZ|Yb|3Fo{}x;!nht;+5IozH{eJ$fZ&#&_YU3?W|!_p70WAYj*A|#BoX@ zucy%j)&)wSfj;$E1|VWpNYnlg=nloy4F0Q zWzW*TgY+LD?TV&x0kBl0%q)vMxpkX?Xk=k>GLcP1BUufeuSY`uQJi>JM5)I`pi?L` zd_JF_nusZ?+V^I%GKJ#BM#a*jsRKX@f+ihX2rdSrMqC-yOy0pV(1H1I)0ig-brn`K zpN_dk$3P~BRLZVSqN1f|p2cuvG0B-4>Vf7s8IP1s#zG+@COqm4T3V1TqTOCl zsn+cEVW8j`0N9@33k4i^_wKz(pGS-WTpk~VegVvT#*vJBLokOifUUzp-E=u1e_b== z2Q!YaUJ1*SLqiVRg)3LC__z|Kjn$qGW{#dOU=5L$<{ zq+aue^(qKWK1*L-o3lQaM)}Y}rKZAco}R`qOb!Vp{!+vjr%+T=i{hM-B&nU6zUiP2 z)CroQ$z|Z{R%I0s=PeY8;9u<89iBN+fA1G9O`+eXk)J`Xa8FLU;V1TeR#1p1ov?BL zxA?DK_5b8Cyd-ETDiVR8W*p~$g4Y3{nawQ3%w_UeaM3$6V~*#s$N6|w;1c@O`G(DDMO_<2mKjKVn^Ef_Z&wWk!TfY#I+_D@Tf$kTQMT)5!c1W zTC1*Xb^BO0?>%|p!i9I=?%u3hUc7i=f8CO9bLZ7}7vPwf)7x0Z5I?D~gT!Wm#y@AV zw74vw=!uH;C*;q0!u%8Ks9S$x_Bl@|)}Kf|=LzNd6XxeUkywAC{2NdF20rnd0MPLh zW?)NeYwNCd>jE!F>m%3e^g50V>CKCe!^^3 z@;onN3>QxJo;!E0_jJ!IM^7Bv+p@tNR~jzf~L);W8$JD78omzy2uvf zh;LsF-I5lFP^~mI6Us_cp3sJ3%9H&fQoD4?1Sz@cS^7&ze_5pME*Jcav)~h~t4jZ8 znu*;f&!0c}GtS0ApaA=#Tlg*jIsRo4NCE+mKiTMR8`YcBZ?fl?@0 z$0MX}Qoe|4H>4GWK9Qo*Ju6U#P=hp$5Ndjs@<>%81zJFSqmNl>B>Z|&=@cn#DXv?w zN=M-TBBc&NH~gPsd6L{7c~iPjwg#z9q{=X@$5c2TuDTWke2^O+9v=6l1S*xgA!9e$ zY;|>YN8oRW|JYwY%3>XguCA^_T}PD4BlS0mT2hmi+SghtqSd9e@ZJv2>(=S70xbb? zeuIJlcLc}^)MjJ91{e482OnNbZWh<{+k(LSfl_G@D5pgt;~OMdjkhIosf1Yxd-i=s zO`PMzgNjG)v9U!M!zdyi6j=8JN}^xG`g~sWp5FZ6;>89yfvon3z@B{>Wgw9o9wRI3 zL}}|T!uCmJI9S5Wg>svbZANC`R$NieWHREW_Aa^IS#Sxm=)9>43OzLVdXBo5#>PgE z9zA;M;?bi<*e}R*s$>p|dwLdYy#xSF+{nnp$e1fIGch_b<`20h@iH2XOm=1V0p{No zigYr(8n3}DO4}2OB<+lEVk%&#(|B4Uk1J6TR6^X&8Sz6kf1}CQa|)F~&#}XuFYfPr zv15;T!Ym#r)5bRZgbI_Y*nVtPC2bLmN~O_KrbG20$A5UKP)*3E@1vUd`mtM(yT`;& z6Yl=?cg@;Xb>YZ^@%v9a?loN)E$G6P;L^8PJ@!O*!{X~X(|z#3(IZ3;CUs3~dJtW5 z_f#4i)1gY5xQ8v=ohaESa;%QLRVKB1s|d{$Q!(^5yli*=yW zQVhj1_=8^k$7pj*4r61CM5tLbpRRs>C}6>0V}1xsMoN5!JV-uKj4_W+VgrUAuQbRp z)WC?i>$njeKwb>TX*gJou{egnP#XKXNQ`=1(zn=<))6`@O_hY2rD-{#ercK@w7fux z-8>@Fx_kFvC5t8~yAlr0O;1nH1;c>noDiPD(~Oxg+!OweYA67f_28_Y*>uSEG-=TO z%0-k?JBkVAw3a$R@AbNx=1^Sg`3u!r{$e$8P~1O?^sjQQekJ z$lbq>3o7KA!aU6M+@kN%@CeR}9Mdt}N@xO`n+(Tc4!719pHJCYIS&a`0Os9?4q|jX zzZ!0C;vntBF8<#TYbE^v3b?I7vnv8VYWv^xvZUvI0enAdd~a9AO3K7i8FVcI^`&mp4qH7sxm9Up{FUM z;*1{c=k)Y4Pm&AM=x07zO=d9%5A8PNaaIC&xt*T+{0qBg$e9Li)B1`a(qo7K$t{Ww z7gf0*&()S!qS5805FUH`UMuq_%C248(p8@0Sqd^awH9*>C`mYInY zx%X(=J32ZwGq$Qk9^q`xxR>l4CWJRBd9)g@zj5j6)weERzIy56s;W34Xp~BiJAOKE)|Wwd9|xS83+U-w1rFH*3-1V`r$96sp?%Pam&4SwEe(oOe?-@gOftvR&nK) zi55*kC8G=Bg=mUHVKC9?JSIgJGxD;U`i9yvE!SUivJoJ;xswuJ2Vn*&W*}^v6f57L z&N9Mm1@;cI_mJ)4^07$Bi&@@>ckhl)qaE?i2k}a3(Vpni;>Va$G%XSTqx<*oa~!w@ zDwDCR^EpVz@mh(e8P0A&=}s;zC&hdj?mu4)thj9I6yMtAi`N{!@SA_}7k}|9mo9zq zhxq%KUps?WcLTohy7l)ZoV*hmZG)i^>PTB~YVLyE+{W_@j%9k>zB1amikO z>eQ*O27P84`%qqPm4~M8{_p?&zyHq=zu8ID3C6&Sx{?lDRe!)>vTM);%J;aBq9!JnBWCZ&Q`2%D_QLxGszN(P0SX9kkZ0 z?zec+|H8>QSjS>OeCABpA5Eo#&>sHT2|xh` z*W}i)_6-taWO6=?5wU9#c~}Nah38$$;uojZ^xXMv{f5Y8=-z_swT8Xnlgmi3RL0^A-b84 z+>9)-gKf|;EHL>WGrisLUFy}->lE}76os1g|dZn!BMBH6^A`UV;Q(0+{6&-|c&q^JHLn5D% zsijy#?Zyc$ zU!%pI1)+^dOLQDXSnV?<3+Lj5RX)p(BRhetK_(X+UKypfh$m_WQ&|}W3$(>tMlCLi z+0{969GFUiTyCdk1|4+A!3K;N9t6-liU-^vMhp$%C7jdcXebz1Jxg=rOP%xTB|J=9 zQr905Cv){cP?gPbD(z|xQ8Z0VHj8IzTQpqOg(fe|RhC9W9L$mUyh}=6IYP^%X$7G& zX=>iE<~l-Wq^WYlb`ykJ)@ZR`KDpojvPlvXH{K9|Une5_)_Oz;BIjmt`8g0pLxU`0tLSg|$(UtwwL zCFq79NO&+L$9e?*V1sN(6pnA;bD?jzfj8iX-5XfN)bniS5|QQU4K!U84sEc5BG4t3 z`JNPoK;GoKRr*HS6#P$-UO@V{OQ{b&5$RQ=|F)FghJPv2-$gq3l)i=ZZKQ3S0x#NZ zmMskrDfrBi=Mi2{FjL`+rv6`N{{h%mk?oJ;bGy1^NtR_x?k#TV)r61)0tqY-Ah48O z>Qc7w-tu~XzETXk|JQqO-}cHbKiI+smR^>GkhsN8;@)l9mMrVaRxkh0NOCuMW$Y_m z&D^PX%9(RM=Zsn{aY;fgad?LTfdtZEMwYdyNN6!^uC1+=1lDC>nYl5r>8Q#wVI@)4 z3o`tltEv+vovpkUZd+YVO{KliXfzp&S|g_7(rwtQRyfFB zSynMD$5Ux=NH$A|ETk=Ya3qyV5rL#+O`e#JB$A8>&BSaA?xXzwGC~UDs0b8TP<&5- z>hS_`fI^Q3=qk;o(u|8`(f|YW_|j%bu`FqCPmf!prsxVmU{HLuMN`xuR_)wbw7*5g zimXOSsI42VQG5zY13mKWM)WX%!W2L3@hPi{WtvckDtO8wcAj&gc-p19I35zfo1&_4 z`}ezxFl|{XvI=HnQ$V9mQRJ|6=#WIJ5DNmV{5-wjg7Jbp1=}F1<#z6zdt-^N(h}96 zL~G|po})G5!fkx41%rTVK0S7G3)D?Et*)`G#?#Hq{lY*PTtq~RP$vww@q?BTng-KM zgcnbby_o(s5<*F`&+7?;YxVglK5!wm$W1yBLns-e`Eu0*%QyZ}9v@cMIcJTzOxH^LT##=ZVMj>`O0w`z7*a znFpNqUbG4{f5lTU;BoTgsg0E37;T+Ww9bFc9>xtUZImLk7NM$Jf^Tubci#=Z3v4C# zS~&a~zQuRBw}Q7|jQ$nhcJjB_%46hD$)7TnFCHV)KusEy9|Up3@u)6uXWgvIsi*Lp|sJrCZJ zBDa)))3G>)PJZ2=Wb#VO%4TQh!VJj=Y`IjY)(EXCE|TO#E=|%e?=dma==0AVDUqfi z8SzNA!a|#B7Dj%e1v~D2U}knv>ufj-!OQUzx1G2R?r?*X97Yx@M}0jtN^_*%sab^a z4uioUE(~6xs(rl!Gf|fg<6cmyBhdu4Wz$O5>rEFFys1`Sxzac~N=G5N%}p-6to`uA zrfEo`#&_%h&E5i?X*YDIUnVPD>3xV%>9Gh zhFSBE2(~l-pY+fYB{0Gd;hsHB9)b6UaTLI_bj_fe^c!tMOa~c`9~`t;Ixl_R(a)37 zOdlVLxVioNN#fOn^&Yf#0e0k$|pQJtdhVmBgV^jWbyd%<413SdM^2SnQ`b}-mt>4NGyk<`|k1^I98U${pVW=!>}v=EX&h> z&N?4qn8>^j<^{%mQL`C}n5ypn7A~3KIa$N;i6pt`&)c8pcU7w*8C}?d>V1Gb?yD{! zLv%5O%4|kceS5*w$&*uPi55PUBpmBP;v|`ZHu6DeBVWKkxd7S8!BeMRS#2pX(^5-l zsiWkt<+Ceu;|}=SV++0+&n$(jV$vU(oeu%@{K+RVazSRD>9m`HN{Qs_$2R4vFZPPP z6Ply5b4yVS?&qIB*<_ssC-RnCI!U?AX&px1#f0W$Y1?j$=tGUQudJnI)mUqDPSsX0 z%D=a`Kt3WDUF=1W398fQ_m4fLP<7o?F7^~TC9hi_sEv{=Zh?cXh(TW0V;LNkNybpb zFN_7B;(r0Cqh)&x1&C9K!KK3sSdPWAy7xlMG2hGNOD>*8#?T4VHY_L7)bLx#o}4;M z^CvVd8{TSu*%}R(YkFGtN!Cv;x+Rg8iu!gRr{za~-lPNG*0!Pq&hz+@U9GW-wn$iw zru?B;+O5J0on5Nk1z4h&mB6X49-mbMCslYJntF{D&U}?yHH!he*U7GEBke_Q)XJ%2 z{CnRU|AHJ}lh1CMBdI$EJ+r^G*L^|GzlL~Uobv&~;6l#)M<0Rx6jFScvwccPrNR$2 zRL<2QDi70O?%67H$5=EvcE=qWYc+(e)mBY!?;Ur<`yfT>ixUT;ojXUi&U>T96MvS% z)-R97n+b!9kWxCkwoOg7jgAUT0zEsyK&KKv?ATY^1yI*+9VH63EL|y`hKpW(wP^qT zC}#zIWaXk%Z*umt*Is)Kn&uir-n(~p_6B9#Fn{e?o~KR{1{WcfIja`_si9$eLE1l& zF=jF0PuuK6gOmP`J{lS#BanzuvkGoA01YM7Dnrif+sNEpROTF$lMZ*KHXaNHY;8uR&~%jcU9*5vcl5>(?#Isg}=`TJ4e8jVJjxk;yU(!HT{agM!k zaWs(7gTB=#0;8W@VAxn-7UcTyI3z%;B zE-KGHvA=-H0En4_{ZBlr1jT~#j46)tf?eCT?II0G2ONtUlxKf_)@a1_rKQ+%Iw%}U zw-q05_hvqvF1w$8m+q&xT(?%@?8{NqPOiV7d-wdsw)V^Kz542_=ndB{fA-0=6lBF815^G@t2V9{?dl6O-E*mZ_f%d&9p z+|pzq;bJuTvUI)eop;_j-`)EP$>@}0UU{&L6xuWMT1Ilo<=_DH13q@X?O)qI`Mmv; zbKigc+-H5TUGUzI{^hU!>R*2Js!YjU#%*8->~zouuc1adNKqluT80(iq7L_P9GgFO z8meVAHQVnz^X!W+K6~cQJ*HG@&r`?9Uy#3G?tDTPs{0uxod!oWjmB1=IzZ;motv|r zA{+J{3^Uk%`Q4Zh1p{$%@bk~{`@-w5zkXqmw4-xjt5GELCaqe-xmDv(Su9b7sn+87 z_?~?Sp7iz2BoYZ-8CVzNJMR7Z*S~)64!R@Gsw?uoV8kDFtBUd3yJp!Ht;ORx+;m0o zUA&#k7eD^sCm4Hg{_OJQUQBUUKK}Rv`i|(!!vrU@ct>ZsR5Xr_8wPQdQl@nl(M@+h z6;o&Mst)hpw{I8TRb5qC+0sWJeKZgkW#9cfui99RA3PuGP#%ufJ za=UwVFLZEa&ZBe7*0b%1tQ#7#TEAe@GZ@Bp>`)SVuy*wc<--qm>=^&(-~R32J{l*S z%&66_EhpSe-uL9Ja8&Em`YTtjbPW_5q{XS|TyNK>oI%^&t>r%akSiG&DB%VMsD7Im z^1+4DvLxkK!sSacn;svhMpBxZ=#|+Sa@UsZPaP+2@-O6nmHbM~HR`i%qgk4{xf#S78yOz*gz7E% zwnB%qw5+1C%Ij|a&#e7ycNRG+7)Hy6d{gt$g5p@Ay?W=N=9~9#HUqS6qY)du-Qg_S z)`S&n_pVvb-1OA7tDv0P+8w$6QI^wCH$j_yN1dJv27Qa6G_=}7=%F9&FL&`68pj`P zHHkleI3+Ya@Wd0(eC5kuLEAoy@Zah4yLjaF&iOSGpWR4J*Y?+c-FAb$;NQuAN4|E9 zbdfIMYyX8kA@I7}w*5_R_msmvT=>&Jy|8Xa@)z=-k!>0BfZ4WjXTqE&l$b;+f3kua zr;@3BTE0yd>OPcP*IKB{4?OWiV3U=)V>C7QT0?ak=I(wvcYkYn?kcJcAXU^DHb>Uw`^S=4!vO4_gzNwMcU5%*gH1e;??zJlU zKcHnlyGA>IPi~fQcKq$%c6hGog2RE;$nk=7DPx7#yl8kJlEQ9GOurXV&UN*lUV?H#4!A{4z4kMio z^x>_SF2H%dVBso&d0q@;jN_GIoNjvRDO-b3HE^R9Yjv*{%kI^h>Anu7--=&za=FIO zS;Kg}HhE5-+Qb_WXkB&#(0iDXnNB+1S>P*{d34XEkQ8eh75-XndY|OjAosiqGR| zYN{z~s6TYLx}>nEr12I^`^R>a>3zs;PF+N|eovp?T}o~Oi$quGFp2`u`PMvxA*J{i zXO~1tQmNroJj=+&n;I>AXaMCJ4D*&o2z;`&yCt_nwORVhg;&~@aY%MFX_rn5rkO9HDQs-?`ADV5wD-h`6AwTA^rQINljl(eFjSdG9$~_` z32PsDM2p=i)g&}YT7!yBFkHfwcd({V1Ct>K51P{pV~|su&1-le<}yN50&>qGXW7Qa zl2(Dw^a8%Z@{q?0e28kJbXO#!S^1H5mA}1_pXg~9JY};jSlXGLL^uM}d*@*RSQFjA z78VR}i2-3e)UBD~7t2Uvi7amSlo;=yF!ADfT7YbvLx^)YYr$YDC98USjmD18FMZxm zxrnj~EoAEJHIhD=!&q0&su~+f5#!QnIYf963U-jWeR3_TM`;a9i+0yCS8rWkeRtCOM9E<%#p_ zo+!=joK$tAKV`?h|NXI7kEWmJ{;<3I5AiL&%Kmh;j{GtBj-z+|YWlzl@_+Gn02uce z8DyS$<~SL|-5>GkU%hJ-0}fRd1d7DSd;_yA2=sEVS`>Sjzy;)O7cTY;dBJp_>xG-c zjc>H){Lct8KY9g5<}Q5t>1X)r8UjDOrI2Td2RN(ggub+-*yo)KaRnGv1tf)eluKhe z=3Z%lCGVS>?Ws}F*qHtxHb0p8VYJnJvQ4Dt@ zg>0khSR`o!98G__b%R~2@vQv2W(!*Z*)VZ6EHAf4>pTD8Q@wEcvY3^Z~6UKuJjCg z1@c~&e>m;t8XM#M%XuDj_0P{&RQ%{i^}BY}R(Oa;7NMJV;2_QJ^Upc{WwPE*kMNT~ zBWZ|wL)P|j8FR$4 z>8vx84|xu=8VJTVrZYj)xn=XpIY<5PhyRwAxCXkl!)zlm;FX*18EIla*KAJtI!)os z=Czm2$_Gmkw#;eF*&{1g5>%5>S;*)ijQbW?I#nzTQk!`Tnw}m_#sqXSNzLW)97liz z&|aJ-g`hqQ$@ImGuc#^+EI&-;@uzMhXUU&s{?3}8I(`$z$4$513FWLiZ?%8(n|6%k zR@o7YCIx+-$z+0%C>f2#b{7f(n1Blig}ZmlOftD?civ8G^x|@jw&&4kziFbTor3#D4^Up`fy|UF*W>IC- z&^4Ov`@pchX?K%GvqpYyS;upv-A4F0Dw7MO+r@T+02UsaJmdKlNhXhr`$&i!Ngk02 z;-a@$~)u@+;T4qvU_Hd)Fq<+MAk=lHb!DNoF&_r@SH) zGm>>YN?O-(HblDJ7#Osghj}K6O6JPdn3Id;qfA3tCxj@@Xb8XQ0!(qC(L~av>X}RE zD=I1=y3EH5sMw2jX>Wzc4{Wht_s~P&bJAHIvJEYla;bLOxp{2n0Tf!{f!;)AE8}3O zY?%{e%vs=MS0Z^JfH?iqorurt#VyAV#%zW z5vX61Nn&}#9xBVOspdSwavRE&C$x7PtV2FHp}Jb|4fz&iW2j<%v5L_Y9traC4$uY8 znwlD?rsLY1Z@zhL@yL-yVwV}MR@QDa1x8^`4=9hY}4kITblS-k;^ndestc>0OS z*38Wg+w%idg(Z--+J|SogJZHu(iKxx7K$WaiV;l1<;%($2k$#GF{8_AWoTz6&YV5~ zrbA&NMT*#$6*S1=;>3zchia=;C3A}1uH?#j^GbQhN=Y*15(She!d+||4=@DD1_c;=aBPHe-rRZJ&i zyoS<(^YgMgRt8zHC#EkebCVU$)_usU7F*Wx=6w$iWx%=qO8Uqxo4V~Ok~NGHO5~{)oo8fWhJX_D-`ad>b4;;j_?b9`?Mjd zl#Ak-_4;Ic5akoZ6DNkjS^W6Qu&h3M^ytk8_s-4jwYWIFK9O)|Y2@4tL*X2fkj1vE zAzjKJY#VGBMqGS;V^7aTxv>4n5w#7Y)uwL02A z`q^lVIyj`Z5MOm{kKE_Ngh4*XLJ)q43Fr7*jd?V(`ebSXUNCfO6`p`$L@OQ@#nsLL+!9TQ**YuHac`y4>*kI`N53)dB-j;gkIt>NfVT&V7oKm5Z_Zn(?( zyIYBiEa1=eU)pZX%K`&JY|Aaz%Fcz-V0n>`K8mc{NqhoMU(qr09r7KfXycB8d4PcY zSV?6{gNpD(l3cw-GHyq8Xi2@y6z3B{r&y^^(kbgf#qaO5)SNI zpOmV!baZqzxmB)UJ#DACH{O_Ahu1$RyVnBtiS-z95trV&4!BQA6b)@HvI^f{;R!ZV zp5W;BzBl?sbnxr4dkaF?srj{E(|i#z{G`k<%oh>FTgf4J-qF) zbwq!-wT$GMn2jr0i*am&R_yv^40!0R7BOp8)fURJ)~#2qjk^CUdna1H^|of|scz$+ za`Z$u($K0BpMIL`eL*BI$ZjyzTi4q>XLi?{(Zq@1{LC;=@}K?S-~0OJ=OfgHKCI$T zbyF$E`20MBDM7k;@%?s%8b*>BhA8dtqaT_scTY!&AtSmlkmz*x<<`1@h91~Og+Qe{ zsEnef;-;Has^}mH&Vi(D=jkV&c;enY)ztwAB&1U(ns+qqEaY91P`I;cNArnOvgy>_ z%{DUiDLuz)irAX(UPeFMl(RosvXImpVXRjbTj03R{74@-iGu_E0|N_O|L0sru9AkN zD^ZBK%Y|l^`S>hWS{Hh?c28q$iV< zU*%EqH|#Hq=;&@)ljhXggyDzpK$_;#LBsIw+mC`~C+P{cb%W;EQr4_-H}u2$rOr-C z=;#p06=4;wB}tNr#tuz=-ro|pg8(YZqyzVJ#Yu}A0 zzMDC@L0^r2R;|ySd!dd}Ntnh~z7t%UUFBe*BMOy-We@^Qu&KXniL90K(~YP0T8Q^^ zbgR$3#Ikq!1S>mXa1o-zCMZSH>2yzz7MY4QH6ggzD>^ZeNJ&K)=-NW zw3Q~EW;w#C*eRei%advUKwl4DhLV5a$>$=AoTZ%Z5pO>6rLX?RZyY(2B!^^UK~t^M zVP+IcbhSYX)1^s+wa%-N(rQy_KnrFdlVcFKEJPLt4 zUZ=v)^XbYgmNEvw38tj^!7uyf)g{fa#rLKA?>_^>11ApDk>f}@ufF~!D)6S z_l8I4Nqy)0hx{&0d@&k|gp?G9MXnB3!r;oRy-ZdHqjG4#iCz(?r4=7+b*GI&*_Jh(Eaz{dFK9y z?mP44haPy~fjjqCk-LzNlwYtNwXQSJ!xDQZCuQBab7qr71xFeKpWb*Dh?d&A;KP2; zY-O1kp6%?o-s@Rf3I+m!P+G{x(SLdIz#!Fq3vwg|L_s)}NW09Opr(hO@mH_T#^4eu zhLQD`rc!2bw<_|)&;UIPM1>Kobvl~vxNTuUEW){?XU^Pm_~>mAY#iB9!QySD3hGWi z_Sj=z+F49)M$)=`v({w}j19Fx&3(>l<)9e65KhDrvi^u8HU#9-Wo&91j~sDtI9;fy z5}KmZ)6t2EA`*}}!-4(#Wp?**38xEP{z)|IaNI;CpjMfSUp{wEX5SuPo&z95$AuTR zUqmz5%gU_y;?t=lMG1Na2Pg3rN~EmlzWS6Ot>8%+aG#f&!~J}U_E;^5Zz3>~1SK!t zrRCLt$xDntK$Xh{mpm~wkiY7f2VFX?D@KzQ>(YL|`#>>|#*r)*6Iyzs*5eNIg5#ry7l?z!jg*+;&C3{#0DsO(gPAw28S zvOHm8sWitVVV=I=&I1k(ATiEy;LbY>l9L@^V{}X=3kq^A_Eo~*!nia$9HUcl(cail zS(%r$4Jf8!0l28BDa9O8BECcYZIZA zwkmsI=F<4JYwjkSlz#N#V~rN?oM$=`3rA4Xl(uje)T?(kT7r1*3&x6l)b{872WrV} zNL*c0w;#Pi+uP-VmOY<{#F2Pxd`dR%sxhP%y0Q9QnNMh|cI|Snw~9+7YD}CkXUPQE z$D4WmyAcX%BeYc*n+@}96~<@7rnd^yWy9vT3e#u9rnU;>ZjhfU8>ZYK-o$@5O(`3e zB>9`eoY}C*`Y>TNP1lV>Hp#HF>G25rqBcq2IK?k$5$#rC+=iOnD8<`y`@w2mU!U&3 zu+rlk)ba5zSnjJsjsuqe!jiA1Vsmn%Wk1WAD$DZ1HR_Cfl%b#Mx4F=)cW&;(@O$D# zLf8M8i-t4Va1MJ#i5D}}z%KzGEgm2lTELa5E1yFrkUaNUHg8q(zT#gD|La@$Yv6C% z!e0x2?H2y|@Q-fcPxBSG@YloNu!X<*3(Bd3e|YP3Xn8hr3AwVskly_YH^P*r+&QX9 zmD^+S|G@xvCBMw46gw%EU)~TJV#dh?Lh}?0DcTs?!p$?pk5Ii)A+}9%eT5yftxMUtWj@Dq)H{<*yPWA{A|AzdJsM9)V9=??<`TL@0A_?1Y$QU(?=nfBC21Kq z#<4}>Xi&z+V4XrsCa>t-j81SB3Oa+S00&kTm<-f3Detr!I72>|qIMJ@2kkwZMavq& z)%ALeHXCTSC1SA$+-vB?GD2L!QY0Mi@24#wlvhZS#J(a5Bx8U`5J?(`QLxhZz5cQ`?)CW=W5fvjqu~`vFz1vU=o3!b{Bqc4ktk8 zsr=#5ATfeW)e}J=2HfaqVcaC`Vk6<0i(y#23fK>}D70-898_;G8KyL5luOqtqzNde zq>ODvE2HM*Z4QT7%TfA9ElFw)xRch6QgF zR6r`Wh(a#_rR-8M1SBxeLG$U0D06mpab$Lc{kUIc36ez%IkiYsgR_0nKy)xYrV8g1 zeVB~s$;yr?Yt1RikddL8C<8qxF1j!>oJ@v7BiFCY!1gvs&-p+Ios}9v)C5uAC1OB- z(6~7;wdPzr!xHR5h)OPX*o|rq=vz*0$SX*Z(o%b|-EK8o(G&C3YEl52oR=gcDrXSW z)S68^E^B9J%{qxXQOF@5?$2?h89{KFRT{#QbV;Fx#C&5D6CvztU3!M-=sV#%yHmw-E9OEo4l^K)ut6lz-l5WN7!Qh|>7B_f$nbCX1t zmfS>gv4T$Jsud0S7~NKr4WG2q45KnwQRjSv3ipyBANN)R9qKA-N1voQj&-S6jt+UA zQt~#7LBxO*4H!A;h~h(2_>@RGy=vq8bOw*Xuw&CH!CdMn(g+~W5kC=kVQdRp`Z`jJ zsK+7%9crGW7SXBrQmYH|0!g_r{LgAf7YTh%lX-0hKFO6jEP8fPSxk!@<0_C0dJ`Qp zTD3q&z1B)gof$uB6*O`&9GRt9E1Hx?k}QjthLl!b+R7~20zBO+=fP42AJw*PC&&(7QkPM{3E$~@Jy@Fo1kwAn6QS9iLkiqzp`HqfQX{lS#D9VWw z`($zeUbo)LClVXbT6Avj!Z5eGxrGHfTEWj=e>MjvG2nF)>)GrB`{ni4GGi2S3h%?vuAJ zqPPl5%avC<9J1sntSGOpzV+7D4fdmZI@^&ZMSjOZ_@=40a0#{uyIgA_n*bzl=h?hl zPu`70k@T#85vkH-`TpUdX=>1NvVXXry!&phE_dYS#7Z`aeZMG*ixbz*f5tK4*@@As z*!XpHTx`2^iDhwtyg)w-vD!RaC8*;9E{(CGWC%x1w}Unj*uRqC}!dGaNBNaFiG9y=KV^tE<%EJj=D-;OO~L_d1Ph zqE5Wq&0YJO*M`X7%fF{y$TKR=BR7?Re*C@cb0s<1lEDHq6$!!OdS4)nO@00(-+LR|?h={R6_VlmhpE4)lyd}F~(dNPhH@AED$cTI6 z88jX3v@Kr|7N7eXHBs@(`f$Nw9vdTL2%npI?5pJDa(F)4x&+}^$`}qUDsbFT`(PJ0 zHE=l~>m`r~Qb7%D9o7_p*3~9VWji20*U0pg75Gb7P}k$83ENMxg=O(q76 zL=Q0nK%VOfs%5DJCGxuH0Nni?!Ejura1Z2ULk>`gxxv`c)e~CeIBs!fh@QkTgJ}HB zymu06>%NJ}$q|<-Fhya${ZoNfM>M2>s{)&R_uYNhsh9;blLgYylaPf1XTWQ&j!woz7w_V|C_R>GGWLg zw0-LNlqB#x7nr_s;d6{`uXn5)qx(Wv_m#FbqM#Vcbf(tRbd;;pF;38FoK)?MO$)rs z3M=7SV{xI?Xt9vh_GuUypPL@MdbKC+IQaOJN-(Z3*>(V<{lwk(!3^Js7NmjJQ4f!L zddRwQ-_H69D;FL@At%xdCJ$RG8VDE|ySJVLAU3qSW%Mx8yC$A$ zdDR%<#@RswVI?KX!id2aJTZhP@)VA(?*AV@(ZcM^Jki3uNmhH`;f%IIM_VW45?#Zy z+zi?~>n^o*{P<^W5PrHqgS$+|(#3&`EAF#TeXUNc9|DmyMw>%fVm0QXa-9YoxNx|_ zt|3;rXsGXc@8A&JSW#(JRaIGGStY(oOQwg0+-q^z1f-7VC!;^{U>0Chk?*J!#e4UY zcY6W%W5n2ZvSl@`oECYV>wNRgPC8>S5!G20>t~<&>Q|q^!)_)f=34*09L-uAV^we> zMldJRJ2n=%etq;h+|b0t5WeV-2zEp!mZVv=$yVf;_IQ;j)v;!GHtA$tGR`m*?y=O} z#j@^Nm3I(sdJ&R^X?o{X6*(LSZim}dQL&4DA8b)5A)ziE{%>kovHv>GZLuz zx88jFLO2{_W2`9czvajga9r1y7lK?4E*Yi=R%CvRkM>@H>$%?7cfE(+^^T6Cyjr%a zdx>QQkc{!9%<7tUy7E|#M5*mhN0H5>X48b0mu07}!Fl6xFa4eZ*_6NQDBS+KhK9QR z^ln!^mnrX&Be(3AL>8qBhcCSS=36MQ1ZibJ<#djXE}<@b80Fmx>&m~{{p#y2%yvvw zV|Rb)?t5F9*H6pqsF~#_2e|KZuQOfSflXy!Wbb88zwRPyQzQ~c5%e7NH@+(=gZF&x zoJzlg zEA~z1uW*4Dc4sr;VtI{34X<3Ij~_sE~fL@P5Ei_B_332GIk zq9SO7(AEU|vI`bxq&L=B_j_HhcL0iE>BpR{f#juqV{m3cw{`4HY}>YHV%xTDCllM|#CGz; zwr$(CZ{B*p@5lXp`*d}k({<3hx_Y1L-M!YL%(Vv@Z?Qk8e~3bOdUkV_m9;CtCPXCT zSn}A~1YGLeXo|=~JZ}|%X%jnV`P~QwZh?#JcYk|5GpoU15Uslh3!+hoLO_V!R#Ebr zINvM~CbBXTR^^;?6AN+E*3}_y%<^0Z+vw5bUF3CF*UShQbHOIb_y0V1rg z+3{+2l|FoaCxfkIS-9TRsu@Pmc|Dy!JRnR+gsND&3D*x0)+yg_V#mih-5=hh)^d!Y z?x>6+)3TMLaR~DI&VEKKQpujM&V@BKJxNKChwnnadRl)z1T=o%tJD0DGQYWKj0`zf zSVUQC4~+kg%oFb2@O{tt^n@SX84=$K-=`vX;YEpW_dFO;=^LSgz-E(BZQcb+c92fV zQRtlP@Oi&9t_)EqDi!)u|6XxC8|&K{m6VEfShqs8p!H!_do3&M7A z2yD02R=ubKha0P0gtOQvS*5W4DlF~O?}<$mm0}Gc(V;-s@cH706!Kw5O_d2Zs04S1 zn8pfV*R&GR5t7jnDauwU^T5BekyX;xSSPeAVCcwqeXrJO&%(UX-C-O$4#X!PQvdCH zbWh3+Ol?Ud<6IAhuj}Fx&VET91&+Rl%~&2`<+>UNWU!))ZQIc~tWr>w$RGr!-L)2 z%XYOgt8CXyVA)mH>Tx|~BRc{5YQht<1zBKZcE!8o{8Ct^8{5Hl=ymrmuFT7`U+M|eDUNq|JpH>sUXVb1aXciU0K+e@BrM$Cz4m#fu2G&|LH3qUkx#+U(>4@j@3rbZ!(E2ny2fDlV@{$EA<~BZ`k2&}lQQV)<>6~70 zrOn%kKdZ<%b=TfV8-|OBe92-a{bw zuu7jk5H_4Ar@j2AXAiuU!V}YOzBAEse)_tM)6|$Vp zOAwbQF!fS0Rp$$5*{k;0meX09&JsY8aq=a~4yH$GE=y}K^t^>|GYhcqcMW0&zkb!= zmMa@^o#3Sf7WNRNwebh&0ozR8LK1ko^Xpr#_#OAh^12?0>s(F(9r4~RitXU@D=_#Y z{U8YOyna|Kf%gXD&mj{mbQ^)0m7<&|`XU&9D^msIo3x>V&IzDDc#1IwRmXaKAgQx9 z{?P|wuj$P{HnFk5KORo8RPcF*!v+)c3`Hk-WP^x;d2@6iRONdXzME zBM{sI=}2LC7yyp1X2!6oCxl^iszYyF(~*kC1S=fLvBaZxbrCv7XV#2C1gc~T(n;Xz z+5ICws2KxrpPE8ayVEg*?&!+Yd>; z%7(UQE}{YHn(}9RKwj9GI2=*m3VLa|yA+&Qb3fM^Lp_>FZvr!*2(8pmpPiKLm$g|fElhq+JDd)@N3zpl0(Gnk1o zca7tey(WnlX&lY7bF#fJzDw#Vx6{{|HTy{qCX^w% z_c7csci8eV4iO)d;G0h{<#EV0#bjYfJqFzh>#uc`L)~9MF8l-pNQ2OFHM|bvl}m)g ztVhGBuCCf~V`kXw@0F$)7Jp7vv|d0-$}D;khVlt_2{D9_ae3m4nCQoyYKDkM#Ya9a z1(Qqmhd^tx3|~0c)iX!V5Zw(QAMa_=QrL7B7Rmde8vBivh5HlMjnyej>#?t0q6vQo zkgfphGS&fhTY`2E%|9oj#6IeEQb(mhXNv$JSS+8#xFO zed`W+v%+a$<>krcWhhg2*Vb0dFE=3%V8#aULpJ#Lo`%h3c^1HDw%ge`1yCN%Mng$0 zrr~5l#-&%;D2X*f^k9(**%UHu#6ttB>ZgACEIe#9vyvjQl~uW91Y%xoVR`XTXW#gc z$YRcnz^VL{Z&RrdCj{xi;%{4u#3FRV`1F=PLl`(5h%%%$jD_`d*JF(J`KOX)F8M^zt$pw5!TXe_&Dx zsL^d2-o%86aSlz@4FF}Tr{~D;Q>SuK|jx_`&FFWdue87v#7C>u~L@` zUT)e`?YiE&U|^$oB%rb@AfAsebuN}McBkDac z=*%xM5u+5SX-b<_Z>YQTn>o1`eqCF#Od90`ym#c;I6dp@hH8U8pOhD`o!^ zeWrKQ!@HO6ot#jzfv1romiiN6okbRabli~v7YEf|8J;9*l}8OOtHOPf`TQyr?_Tec zTU0neOb?zkjNe)?h5n-lG^KVxhK`QD=YiI4*SQ}PA1)#^C=<*7cJdh-ah4H_$K%>E zCCWvr3Sqi0h49yERUhpGR7Z!eU`v0)BshG(tV_=CZ9Z2wGd4UWA;K|qvgi0HpC{Gj zDJ?6K26o+YQkoK!6PD@qas3GNMm9f#DhDLF%g9to8VP1opKJ?%!Gd|R*d+YUr~b{e zO93c%_y|J<{K<_U`w14cNrUVqbc@G~i7`@g3JI9fUpT-LkeU2-j@rDGhuBZAU*eX8 zR$(H6nnyx8V5k9ey=v0loHjmtQ!K3ivUjY>Cov%>E8TN|&&rWN{DkBR(H8zm==<(t zAZ4>SaAJsQvLq+>4>6Lu`cA*RE`#n;S66P|JMx@GErtM}_%PK?hrkv2KZP>|kYN zMOfa-uH$&OsB~)89oIXEC3efNJ3qGIq9MZZ`xAlh^=04fnp!0mVcY3hmx7#&58KYS zoMV1QlJ=519MbgDAw)xyxMK_AU$knbY=7mWOk9OE3wGfWnigpblta)|HY^nh=<+`m z4;%f1Y_}xB1=zqAEFv2XGRo9}u#663X^MJF?rJKCZr~CLo<38jmcUu=KT+IGaI|X9 z`Aj^?Bx0zB#Ymx{I>=DxdA3lB#>sSS4$!;qN;J$G+Cj=U9}m{Zi9U{|*v*|fJI&6I zvfuANj$dSa9@dBj)Wiq zVa})!t^B3rsxrja7dD%DN>N>ryjv{w_RLU0K>@fwiH9;l2%JPF(P;58rjVHrn1hXZ zn2{u>HQp*rIy4BtBKgqxo(Lw<9tp-ji7sDS9}dJ-lxO#Y5%vA@PSAGcp!RR4gyG*M z#ui)L+Hcmw*@d;V3*=uRk>h=ocDgTk-hMuiQjUpXs;c;jSIi+h8k~qziBD;_I_6yY zkoQZ{N}C@eTgCKEaacIkWCf@S75U$DH7}K;tM9wM2gAlgu~nH=^ShL1=vEvxb&*vV z>hH~3Wk=I}Ftw;sMiVm(hkH|kQK4 zCX+g zHIt17W+01jqIK}_8ro@oAVIQ;)8(-s)|TJr?dAzN+EnP%5gCyaO~ClyBTnFZ+BScg zXKtmVgA`OR?6bSI_7swWtCWxs1Zd~Ro16_mPK~?`Ivtpc$Yz@#y6yS%d2>9AOFO6( z>o;e*eHsyx2DZ^_dGM?yPRr{Ib3S=zxLS&>CH9%~QtaENv5)jG{pPMN^CVK^GEe8c z2(w{xX<=9hBPML8#;sMZ1!ok)YJu)BEAyQj{8Xvxt|9yA(|Bs&IGE1*p}dnbGXm!` zd~elj?b$Y}sa5OwdtOM>Gs#aj6_QiYm{#(*n3x8f#MzTvANgbN8x0CBm$M7*_MUOq zOwRZ~n!AXs;j6lK;gUV&woLder$%pT3Y9msz8&HNd1~ZH+P9B+wRSEl7`~lTjqLyd z(z5qz**6JVv^xgKNq43h^Z*)zz`MTz-bOiCA>Goo_Ar^Ux@iu5Nf0XMoKPd)ome9! zycH?|aJWy}!)CwtsqgQhN05He(NapL4eI{G1!QadV-SK({KU)k&ZoRb`P(yRDNmdp z6P%RHsQm4Zcsm&lQo1KoLWL^3keMa#S!XDN2F7%OH%xpjRic5LFnNb91>GoMo<@1J zwXtimYRif#kA9R=!NJYUeyOL_N-XB!kO!YU-moexPp}p2(GtA6%1PV8eca*HyC_Ic zNB_2rUMC(EY9?0qG?9l(nLnltLRRilBwxit<-hM5Zd?)xifR&|!8k%w&#c|(=KG}K z?0NwMIe^F~Uaj&&sKg{KQ6?z48!ub)=j0Q&sH!E)s5IK4ZwK@h@q$I8uk4a7*wPlA zW`OqC+Sb;U*iWY?_-gMfyyXMb;% zqft0L9jNlfdUUge}RIgR4JD0wg^N@h(qC!?mxkV`nC3cQcp+i!n88O6qL zCut3MU3Wg`cqM_SLNP%cU=}aAaQk3SvDeo2B#YF<5e_cxI*GecCQ)4KG#MBQegd_P^D&tA0<6fbpSxb2z2j$?+3 zxl7`e0^lB*lQ?X)*Ufj)A=l~k&R`w6{;>;j*`EG>9^MaWyClVzX^qz511*TKIj-JR zZz9=0VR2aldy`I5b11{)!(~d5gwPJHsf%*yFc1z1kE zN^;8RdKb2fRW%$OmvK58w-fEPI_`c46C4j)-+pxv zf2k5|c{9Bjtg;@P#d}IwQ$EO8QAO>>DQ;fgeJ>Bs;mx*ZY+~0u|GDSX1y}DE-kka8?gO70L$=s<#5OR$?|z6#lQ<+pd#0O zmo(4$(V1+>O9$w(guern8|41!Ml%L&~9hV_5ChmxjIwW{W;$KG2ZRNgZxGRit-j}=O+3D zU#;gUV+8o(SnJfcX}1C+7je18RIgGW{O$u0=v9JaJR5X!8Wbjz(r~WsouP)2HkHVm zOR>3@wMR{(sVPDANkfM^Hl-;wpuhOF6w3TVS$Z&K4v6m=k`Ep-*{n3M+2}iDmPi-O z6K|9*uWU@D9Me!B#BJ9sMMoD@^dPfU<)=r4ShD;`q-Lp)Bl`u(b}X@fZ%enQtfI0O zOPLx+Au0=_{k^r2y?BN8+D5mI{{eaJ3nYtN1w=TOKY~<(qIkPFfq-ABLJk(yIsKF% zGw0FOUeI5eaYN$f0>V?29c^m1AlHDPPuzmqvYIo=@AK-Ybsammc%{N)yQrMm-LvLU z)XyCec)grdsC8ui$M};rLQr+QaM9RC*94|`SJq)kDSd9Ua5RbjzV5WMvaSOD0$~hvNY1J70Yye!*w>O!2zT}a0ysLPSnV;< z6!c<92ECUSC+7tWZFTho+M;#0YrArmbFR9U-WJjM<#5;8$FCDH_qvJJ^X2Jy-EBQ=Ja=PU8m5fYTO$&n=9ZiJdGHza$40<~8AcPls{DyZjb$T$? zz-teug&EOyM(?TV^f(M zE91n#z~Oj?1N;o2$c39O+O|u=_Dc5n+yv~PTAK7R(fT1wj^2)FquE z7?Pe&Re5PP0;IAWL`8n&xveoNhc&46-%RIe^SGyGsO zCQKu2>5sKMVCePa{iKl?0Mnbh6xNuibG3LsevY{Ap8Sp}I8h-a^rNo+vHb;49{YN9 zB<$2c>uSL|$+&i48aX&WTu0afU3t0fb&Xd-z%N7R@truK*Jj-AEP?(U6B{_+wcL4y zD~QHoZ+p5Qn>v!otS4njL#+vJvR#vC=Pfkk5%O_<@aVQ>vB~JWhziRgajY_trJ^;} z7TBucwmvjd!FrXH*_l36H4&_tGS1wSC8S`kq4~0<%gpMWvR(4=#?iG)yd8v4?zC=W zwrpvT_b^cueC`0Nh&GR* z?bWmjy)K48?diIt2p!Z*&*wNBE&Z%`Dk~VHY^{?!-#KnuAi3uRBbNhw1rjhAmo{M`tfnU_>lN$iPZ<`6PRQk^5 zxaGdsq|jv4r5>+6|K;Wv76fZC$bfhzOF%>t`! zo0sQp>px*k2o?j3#F@R2xBac7f#~2r?YhI!+XCQZh_z#BjxBt6j!#5SP{!dH`SnI8Bs$Eb(yrC~yX} z2rYSEEx8#3(U5YIt7c(y>m`(jk^;VTAuIw(TN2m?#ku5b0?dQ2{Zd&l!yx&OWm`FlCIymY-g6DM6N>3Ra;?`&w%z+>*!en-Yn~9H z^Pb}fOmnW@Jqd1iH~@)OtW^&*8{y*{0+058jAlkQ3TBK@pPbGd9$(s41%&qXjxc%e z8~aL!mmNW%hqJqJT}X@yW+$mA5NK?7bWcz1&T|#@x`yZk*j(KEmHO&Cf#$AlZHV03 zwU$Y8xvtKBuhFq6H;MWj{DWw=vB5EA4EH$SI1$%lI2NTjaW-v`Jx)O`A)s@*uvFe) z{B!b1j;wn0m_tTj1{|WIg|oAn{)mS}qP4P9E6%Ken^S >-Aun5A4Gp>4U0IQJ zJSDj%uq;_-j;8!z8*BN3#G5`ojMF>mZtK$CmJZ>LZBP#+{!QxI(n!6=j?D+5s8yl| zCqq%@Li|olF66yc&uRtqxK_{9<1Bz%WM|3)$GtRZvu6gM<72a@tfd#+V6(pWfBD**uQxR;owP8FIttM>^4T=+ zFYN&$EludBGthdY*q;-P4l)cZvz=S2KfBDRiZdk$T!jv@&mB^%V^Q1_xXKs?qV=+O z7JK9WX_6hj5rQ5#_#XZR<>aHdT&e4ifAZwWse0~aHapMWG&cBWv{?RZ`hEHB@_nuF zy}fbqt#tNX)bur{>6ftehFiZkNd>Ryw`lrJv#{N3PTAXz)`CuJPCB~geMIozQlm#$5l!D;X zfUQ1!IFD;IjI^b*Mkgk>MUhTnv4a>qY7RRms)c0?WH-vw-S9;aXwyNe7Ta*5``;;g^I(Vd`+I0u7da=e}#F;{J_6W$C;2b`UBI+E~4_A_HQQ5 zEQ&p-|FvZ}rahkr&RN0U9c#S3P4p`5%G$~Q1Gow$7~C7M`U(n zH^FiFC6R_ryR#`dH%S4ZDE#M*I!7-^?m}M>oyQ08|KKpz^j+15&QmYy$Q`n%QO3zYhIp< zL@=uru9zHQ&p+^Mf`TE$N6+X3DXHLFHM7ULndU-NzDCgbzO@DRYM`}{g9Ucx2d0wT zg|vXtmgY(G{#9P|@KChWPlr8W`g(H1hNk~a>J&0B02gHsTNjj>*_i%Cgna)s>-q)} zxaIxqdlH*u{aqw9fqCww89ikAvHf?Q$#we#8Dn1}a=W$}OpqPy5^-&9Avuoir=($k?pgH2#cR*9FeVS_gLRc7U0k+2y92<1`CP zAP|x#R&QbPF}jnpTfaTSa3cH#v3D)=rS=>G23m#FFV*t7k4bvAKuVE8{3!#`2WN3wo)f6L0KwAkO>ECG`!KDm9U&Aj#-xeF?-Sk^#N4MY2 zU*K+D^9rFIH3hnht<#=H3WI*w_w%358;ibQ@gDcbe2?DO{khi%(YMbMP~(*oqXD#| zcd^%2_HY!2T)|3<7?dgI2@9=B zrQ>K)@X=?cYYwfUkafI;oV=Cl_)4^L)F~LK{e60f@)nUL_9PX7=P} z4(!MF^v4eT3Q6*RSm+w(M0qf7p-4!W{W=i;s*Nsw$amYf+IzTPq>erZZ$br>9Ku&G# zQ>k{y#@X0ocWW8vySn!eNXe`O3Y%_3`aNctsL8LKLf? z?6Zw>jM~rIAuZvY#F}!9x!2wyPHmY$t9Fb&-`GKKZtd5(a>#|`JwQMTK7EN7xJCFH z?SA3--bMO8tizXeA7jb64@jMGRAQ`)dyb1xr!5igNHU={3!alyt;=AmJY-u{FksRd zKX>P|+llT7=eS4T8e4a7uDcqQW855ncNZYo3G@y_xJTk2gJ92)L&;q2Qw7vz<6RhI zw69j=^56RYvX6_shj#K6oiw|&A4v9{sZgJ$*|?6mI630@V9j*%BPhV#=cM2qrIK|D zX~^2=#b_BJqjw6f(B9|fXc@G*vQPEeI0i=Wm_W(7i#qPuA#2z`m8LZXr_mU+T&hip zwl-wZS{Y*pGz4Z}7;?O?OauSAbKuX!kzq>kN!N}2zjcsT{WY;-f&2fqYxuuLt!}); zzFGn$l7;uW0FrtCtIWI(Z~-)N;#jTou6vwTdnnBt`K1nSXBWmDFf<|}SXlju8GT7c zDzz2vK5<9i|zx4aAwo>ml>7lgPd0s?QLl96URHi1yXy{%tO~s zB1rNfQ*OVcj6eJ36ND}6NeSvvnD7AKoH&5?A)dpd(bEr_K-F`5po-tN#zPiNm{fog zdTEAB$lHrs zvw2rdi&jvE*CC3{axexwRt7rIAKxW_`XF@}WU&<5Z!0Wu;|bkB=ic3t$g&s+{2=$K z31U7BBzu;|A(UkB{WVO#wKG;tPY!tm5^&I1j@<`TW zkOVQAZ7Fn3%tLi74>1hKdVCHA_siV;g=!pmqjfY@GpjhDBI`Ay&i(cDCaAr;sNF}{ z_kj!Uu;)iyu9|=&`(2GdpWSTTKSM@R6& z_?=updf73kQ0!e#x@RSg&bHodW%ofewxmL3UKv zTMJ+1vpAkWpANd$2jXtUM&UExm{Z0s*l-=Y=Amon3s0XrKTWp64IaR6*IF*$ZlUF& zIa$HMA-IAs1;!zJvsLuuvRVDy=Ijm$-`+)cj)UC@f1XM8eW_21cZw$=l-n&w$;qW9 zw`=bbZ=$nvGk%9hwTpl&c2mBe(xewGT=s0(E3A&8b1SOyS+$zk1YstbRUOg4qAl?> zwUCFwW8|FHZyoTgmud9>M}*D2IgOi#rM=uE;hQPB(l6b)Wm13d4|wPgP?H;qBq1JD zF-T_-*oR@T#)eJ+)A2>XeCadW_4;=!b4G?0~@LZY}0}fduLs=7p)>B0refS&IQ9HKyv$5Pm zG2O=VfCUAZ~&T8i~ub~MczSu)OH0Fc$8 zf#Fc77^^Tg=?-zqya)SOEr4lvciFmRh*NhwJEDl@WZI6vSQo#5X=lF}2BaMt?@+-P zEZ?dxju%+o4;6=74l={_n9x4T5I8M&UM+WK1uU2NU{7;60+}QrnOR9Ut41MqZpz>p zh46foHsXHtJm>WQTrDzft)Mw3m;$6GosoWZGT41ae13Au)u$Y(VOHATaIkeC(3Q&h z>VcPSZj`Mn;h^HXguh5)NH}XsFdQVdb%#_A_OYu;LNZ&5?Ckc5_S}UrpoM7W9e5G{H zH+LUjKRzIQpdf#+d{>tE85lf@s0+&|psOfF4I-zv&4ue#K$t&4(^&sDu= zpkFh5ae=>o9qEGs20d`c@@}}I`WHt+Y*%OaV)k!@w9a^Ccff>gYVJu5nGLi0%Eaxl z&4@=evMRjrkBM^cx%8ev=mjNp(JM5@4%^i1gWr<1!#UL)ny%Qi14)}Khz>lf)f)cd z#7#$U1fU)wQgLlm_!2yy^Y?&;-4P-XPYLlBela3c2=tLy#@u4wd1MVQ=I%fT@s284 z%HFf)FPIh|;ZB!vP2Y>(f-n$HMRt^yq`E^xYjjtBQP&WEbmPq>zVN&dnc(NpMgL^q zza9tZX=1W}Jsz233Ho}iweZR5Q^J14W3NT*V z&7`Y7z^4H(?Xq-rifx^#A)EE5_)J=zO1N~}z2}3DO}ps{3MJ=d-9>`_W&!#6&Sj7F zamHoZs_&S!*u>A%ER(KDhZ?|G0MFsW4r)OZS*@P^qaRDCoN`Ex;TKsANj{RI|6>|` zri8nBpAJfnX&-F5{c=#rif)dOs}Tq1g{%_YXthK!-KoV z{6mExa$bu*P!#;cn?y@l3HKMdUzfn0>5OpwCm8Flit9&qnU7EHQG42)JnmZ)(zdWQ zn(qC5G;*-r2sZ2VE3R9B3eUidt$(JwOhtd>EaX+O;n*OUqW^3hEz;-V`1~9Zv$3Z%2oX{`zyV*ZFoG#P_kv`siRF*W_g!otEmF)`6%U>cM7b8UK*-Ic(t z`NMNiU0vfG+qKR*&yr!`h07%UrAhyX(&mcoIsJVS^yrV@Ca-mQX0>S)mQ`^YmT7VN zVNGJu5!*d?QR^@Oq7m{9lq9WJQ=dWZ7X1e821ESUNV+1IoAMQED_lLg$z&KGl9z-n zXjxeRkdZVlf{b{?pL03 zQ*!BF198koVI*OzF)zBmeO)epNeN`$ehx6+x~2KsXLort#=Fk_;g+O$FQnKk3Vlf7 zpVNa_dGCm7c(zZcRWiw#sCP3>XMi;hr%gPp7gRm_eyvP|uUB9nRb3@tHwnE+>U8Yc zQaaS|a!X1*F!2!4Oyvcvu*rP1d}kt!5YAta^C7!oG+DQFmP*Ee*QJ zJQ8EpEHes3HOfI4kFJ7q|x*TFy`wax^-(b+5A`^^82E0<*bsX z-j?}yIXsACCY5AP8IotnI~TsiYU5&4emqafJZnP=H#V198~1Z7`w$g}Gp}fC_BcUB z*7?Wim_qy6UW32J82DI$|LWNGdltd94axExv&+@uL`aY0p;UIaU~AUfGVp!Uv?4vw z(U(>B)^E7*ZBhPwJ9Gjg!zQDGIpz?HA=GlhgBKc&<=W~cvU=t^VwXoBLD>#BSu{E| zi}a)h@p0GgMj0!IDnJWLXTk?QSu_9CWYcH*hKY2qJo-M$fnp3TwLQL>!Xg9OtDbE> za8=rqhm?}bo5;fv zU0{?;@sFUQ1PrMZeO!p*P=~=*T;{=1N1ME2@D|MVWTF15zQ`h3uU4g?Ua(ZM@b2X9 zhaZhP9~vZ1fJ%#Zi)O7+OUCDi9SnNFeC1A1p=$6rq#M3kDWf~*i=esSP2fHZU2X2} zcpt}y9*i&Ahsgfqm-l|2c*a<8HH=Q&AGhF)&@*(U;SOkz2Fdapo!v8vQjZoRQM3@T zqVXxE<0h6yewonzhCZn;fmJSiwUc1wiz&agR;S@@0e0Jo(c8jij7?lVZN=bRnC`vg z=W-Lpm&6-4DiOV#@}JfU5a*ph-fW|`4lbXbm_39hP$`0Ud^oSZ#aASh<98CzeYE6r zh;WO-kf0DZmIiJCMn8|VEe3(t`eIJW6e zY}1hXwPkhS7-KH$vwZzo-IO0>^d3zI8biH(%6x5~j)xLs`UK8Rl?$2`F1l7DnxTY} zmXsEJXVc?*_@{bOXl!$#1`b!XOKN>V{3km}0>_rb@Cz7!?ucFLSfMPouHnk?x5wUL zX`VGNw;3^UD{SA=kHc|@6rB|yC3!;OrEcGWv4VtHI4g@4##`+w*xX9GusX_`xyUMt zksR|DcXpM>h)#JBGx7gaPl27M-IB+8>-ipJQ8Z0?kmH}=Jz5_aiB;(g@dt|d)+3R7 zXsez%aLI`=s>N=J^dQ?5RODWZ{LGz_re&(YJTr+`t3T;}2yLTQtRl_m8sJ`pSs>e4 z?mD>7H#qfXGPGQzqiqhdFcx14^chAee!tQ?Mo0f{)M=QS(jHqIS@aU|I)QiOX6LTl zM*yxN$Ni>eo27sfpQt)5_0rP(*Ew_{oloN*obq~cUA`MVi*=I46*cuU>j#=96SX`> z%rPTz(FA3%xHQnen;k(NwKE61i+;bNV7(K25_td-@Lc-7;;B`ztagmRGkU?+4|z)6 zH|14o%^EEz^JNixm7Z+YkfS)V;d;QR75_9H(*q_b6_9+T)35W|n?m3-Az4=Pa*$U{$1hr^Z!Cz$X*WHAbO6o$&C$H${4HGHkB%MEI*-t zu<6pAo8MY4q}RQ{(O22?Or+GML~y5eIHCi+(PhfX|ES!5Zu+7=O*yDOwPWi&4kPMy z!z}TWVBybuKhr?9=Q43d_@EtP40dv=J)&W|+;s99N%$p1kO4QhxxYL28=E;mp|?0aB56{dI!8UAfElgz zXR#B#DY$T*!>Cnc$e41`L}6%7mEDvUk|pJsIi+hY&`QZlK&+>wB8bh?mV;Z@N&|xX zYs8T-Hqod0mv`l>(n0gVrhDRatwsY3YX#8DK)pjZM&-OJMunYK)v_i|V-*>_Re`C` z<%`mx8=hZrRS2$MPS+I(1ELVf^*^;}U51lwR*>)t(Qo4Ts%6=jc1v5SlyQ*hq6j&< z&x8(3X%8>(%xVA~-X+S_)qC28Ib#Z6*m1@TV4;uStfz!4X-0H6ExaSt7}A%w1Zt?t&Idal)10W>YDZK8p)5W*u2 zFes$Bazzdg7ruNoHD97OIZG&orKig0>xRF}$e&c}9|UaQ{f3iY|i?2RPP(-=l2(!Lp#90zHaE87&$4~*c1q4*!1Bu*t4|Y8^{xm(Y z>@D#Kb1qH8w>t;kLhRf88W!K6P2ZcrAD|a*HihoM$w{F0Ca37Z-AxRMqsDU%bM9`u z^8lMdq-Lat6>seS7Zea@p4DI0D_ijKEmPWFJHKl9^>x3!1~t;yHUhgcv1+1XeBEL@ zot-X;y7Rm}3Mm{!$;3_^s(X-dya@tBm7j(zc`8Hj#+(ynF>Y40;wmbl62XElt(CJE z9z1_kY_8MNLR(aYo;)dSVKKNDOogYwRz+RJQ%;Ru_#pD^bn)#WD~?gvsnQYpDvWSH zihsm$VZdJz`g-wmc4EL^5c)dt9e>?yyBXu5bKQhO=Vje|@5%kVVsyfoer|8l8Y7=~E?%T9 zR@QxP9_@@*Fj{TIw(OEc{j^eHi%_*;RHO4OznSC9VFNn?EcB}y2YeDP1BDft6`K{E z^%o{i9C#RfAbBT^=ij@4aqvUPR7h$ldIDukZQxSM7D0Ijdy#($I}v}1dXxP<_XUZ~ zMQ5zvn3*)u_-NjKKO~z=RmxTN#WvMt@1y5p*F=7k`6_<=9Y`2B8~A~fBBzq+N+rlpH+L46(|$A z3=yHT&`7ZgR<-=JMp^HBTi3_2EwJg30i3FuvH{kX)~5i?mu8`>4z3y5CdaEHuIV}^ z%d0Z3nVTlht3pp{d?wSYQcoG3CfBQCPw74;+pBU*hL=xT1H`xDrldRxI8;$d#B9V< zu2T+EE>ljjF0xLtZc{y+iT6lmT*I8h+`|UA)8N$<_C$Na$E3%`$EaojPH9dpPVr7b zPK8cMPK`>(*5}$6+I!k(+DF<~+Pm5k!qM1eRB56X<>%%yPIv{UKfTvK9Xl^gH^i#j zpiN;8I2WFD$S!QHPGm!{2v@pN=1j)Cu7D|9D|4{SF2c;U!kY6o`>PaU(SlA)=P1f~ zo_#0_NW8AJSLLqATAac*qf^*!%3B&|cWf?#Z_pkmGSphNAHQ#Fimvsp`LroSbH~#! zsGK?fy}eId6KEZU=7nc%R5fsph+|eHF2F6oCBP#i+c3ZPvDe6LBg<1SGG%D?-)6`r zD_t&dGH^0*GjK8R)Ns~t*KpPF*m2tZ+}A!IMJz!9T8AJS;Oz~lS zU#ON1Hn^6NHprGZ#Fn2>SW%p-DQA+l87V8YlXhE|Mmjv(`Ko(}s>c!o+gaN7WR=T| z)zD^VUx(6IRTea3*X0U4gZEYJSVX2J*E81y`XiniRE5tH2I2zccwu{;zq@aA4USu2 zjLhxT+_?Hz=;=N=o>#30?Wx1!oO5ejFsI9=9_bd_eFMYFft6%O4iqg>!ZfQ0)K-Lv z^JM!jVDgQTp9X#rl76h@ikCvVl0ElVqI*1X9l9S&COz@R5c)(@7=>B2T;?uyaX)nL zhWec$K!2K4N}uBl8r#DSJ8GvvP&g)RKcm7Kl@c&!IZ)E&N@Xc=MbC2uvT)ICaQQ$K z3Df}zxi<3&zM-6BPON72w`L8$YWD<;3nZFu`;kS$W6&jf1)KUzkz=L G)cz05(PHWV literal 0 HcmV?d00001 diff --git a/search/grounded-generation-playground/src/app/globals.css b/search/grounded-generation-playground/src/app/globals.css new file mode 100644 index 0000000000..d94d9bd52f --- /dev/null +++ b/search/grounded-generation-playground/src/app/globals.css @@ -0,0 +1,152 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@tailwind base; +@tailwind components; +@tailwind utilities; + +:root { + --background: #fff; + --foreground: #171717; +} + +@media (prefers-color-scheme: dark) { + :root { + --background: #0a0a0a; + --foreground: #ededed; + } +} + +body { + color: var(--foreground); + background: var(--background); + font-family: Arial, Helvetica, sans-serif; +} + +@layer utilities { + .text-balance { + text-wrap: balance; + } +} + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 240 10% 3.9%; + --card: 0 0% 100%; + --card-foreground: 240 10% 3.9%; + --card-user-bg: 0 0% 94%; /* Very light gray */ + --card-user-text: 0 0% 13%; /* Very dark gray */ + --card-system-bg: 195 6% 86%; /* Light grayish blue */ + --card-system-text: 214 8% 27%; /* Dark grayish blue */ + --popover: 0 0% 100%; + --popover-foreground: 240 10% 3.9%; + --primary: 240 5.9% 10%; + --primary-foreground: 0 0% 98%; + --secondary: 240 4.8% 95.9%; + --secondary-foreground: 240 5.9% 10%; + --muted: 240 4.8% 95.9%; + --muted-foreground: 240 3.8% 46.1%; + --accent: 240 4.8% 95.9%; + --accent-foreground: 240 5.9% 10%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 240 5.9% 90%; + --border-strong: 240 5.9% 79%; + --input: 240 5.9% 90%; + --ring: 240 10% 3.9%; + --chart-1: 12 76% 61%; + --chart-2: 173 58% 39%; + --chart-3: 197 37% 24%; + --chart-4: 43 74% 66%; + --chart-5: 27 87% 67%; + --radius: 0.5rem; + } + + .dark { + --background: 240 10% 3.9%; + --foreground: 0 0% 98%; + --card: 240 10% 3.9%; + --card-foreground: 0 0% 98%; + --card-user-bg: 0 0% 18%; /* Very dark gray */ + --card-user-text: 0 0% 80%; /* Light gray */ + --card-system-bg: 214 8% 27%; /* Dark grayish blue */ + --card-system-text: 195 6% 86%; /* Light grayish blue */ + --popover: 240 10% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 240 5.9% 10%; + --secondary: 240 3.7% 15.9%; + --secondary-foreground: 0 0% 98%; + --muted: 240 3.7% 15.9%; + --muted-foreground: 240 5% 64.9%; + --accent: 240 3.7% 15.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 240 3.7% 15.9%; + --border-strong: 240 3.7% 35.9%; + --input: 240 3.7% 15.9%; + --ring: 240 4.9% 83.9%; + --chart-1: 220 70% 50%; + --chart-2: 160 60% 45%; + --chart-3: 30 80% 55%; + --chart-4: 280 65% 60%; + --chart-5: 340 75% 55%; + } + + * { + @apply border-border; + } + + body { + @apply bg-background text-foreground; + } +} + +@layer components { + .card-user { + @apply bg-[hsl(var(--card-user-bg))] text-[hsl(var(--card-user-text))] border-primary !important; + } + + .card-system { + @apply bg-[hsl(var(--card-system-bg))] text-[hsl(var(--card-system-text))] border-secondary !important; + } +} + +@layer utilities { + .animate-accordion-down { + animation: accordion-down 0.2s ease-out; + } + + .animate-accordion-up { + animation: accordion-up 0.2s ease-out; + } +} + +.card { + --accordion-bg-color: var(--card); + --accordion-text-color: var(--card-foreground); + --accordion-border-color: var(--border); +} + +.on-hover-show-this { + visibility: hidden; +} + +.on-hover-show:hover .on-hover-show-this { + visibility: visible; +} diff --git a/search/grounded-generation-playground/src/app/layout.tsx b/search/grounded-generation-playground/src/app/layout.tsx new file mode 100644 index 0000000000..0eba68ed2e --- /dev/null +++ b/search/grounded-generation-playground/src/app/layout.tsx @@ -0,0 +1,34 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import './globals.css'; +import type { Metadata } from 'next'; +import { Inter } from 'next/font/google'; + +const inter = Inter({ subsets: ['latin'] }); + +export const metadata: Metadata = { + title: 'Vertex Grounded Generation Playground', + description: 'Explore grounded generation with Vertex AI', +}; + +export default function RootLayout({ children }: { children: React.ReactNode }) { + return ( + + {children} + + ); +} diff --git a/search/grounded-generation-playground/src/app/page.tsx b/search/grounded-generation-playground/src/app/page.tsx new file mode 100644 index 0000000000..3d98d469d1 --- /dev/null +++ b/search/grounded-generation-playground/src/app/page.tsx @@ -0,0 +1,709 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import React, { useState, useRef, useEffect } from 'react'; +import { usePathname } from 'next/navigation'; +import { Send, Trash2, Search } from 'lucide-react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import Icon, { IconSpinner, IconGemini } from '@/components/ui/icons'; +import PageSidebar from '@/components/ui/page-sidebar'; +import AboutPageContent from '@/components/ui/about-page-content'; +import { Card, CardContent } from '@/components/ui/card'; +import { ScrollArea } from '@/components/ui/scroll-area'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +import { cn } from '@/lib/utils'; +import GroundedTextBlock from '@/components/ui/grounded-text-block'; +import { makeExampleQuestions } from '@/lib/grounding_option_utils'; +import ExampleQuestionGreeting from '@/components/ui/example-question-greeting'; + +interface Message { + role: 'user' | 'model'; + content: string; + searchEntryPoint?: string; + groundingSupport?: GroundingSupport[]; + supportChunks?: SupportChunk[]; +} + +interface ResponseData { + text: string; + groundingSupport?: GroundingSupport[]; + supportChunks?: SupportChunk[]; + searchEntryPoint?: string; +} +interface GroundingSupport { + claimText: string; + supportChunkIndices: number[]; +} + +interface SupportChunk { + index: number; + chunkText: string; + source: string; + sourceMetadata: { + title: string; + page_identifier: string; + uri: string; + document_id: string; + }; +} + +interface ExampleQuestion { + text: string; + icon?: string; +} + +export default function AppPage() { + const [sidebarOpen, setSidebarOpen] = useState(false); + const [exampleQuestions, setExampleQuestions] = + useState(makeExampleQuestions()); + const [googleGrounding, setGoogleGrounding] = useState(true); + const [vertexGrounding, setVertexGrounding] = useState(false); + const [groundingOptions, setGroundingOptionsState] = useState([]); + const [vertexConfigId, setVertexConfigId] = useState( + 'projects/503991587623/locations/global/collections/default_collection/engines/test-gg_1724941548160/servingConfigs/default_search', + ); + const [messages, setMessages] = useState([]); + const [inputMessage, setInputMessage] = useState(''); + const [isStreaming, setIsStreaming] = useState(false); + const [selectedModel, setSelectedModel] = useState('gemini-1.5-flash-001'); + const [temperature, setTemperature] = useState(0.2); + const [retrievalThreshold, setRetrievalThreshold] = useState(0.5); + const [activeTab, setActiveTab] = useState('chat'); + const [activeAboutTab, setActiveAboutTab] = useState('javascript'); + const [responses, setResponses] = useState<{ + grounded: ResponseData; + ungrounded: ResponseData; + }>({ + grounded: { + text: '', + groundingSupport: [], + supportChunks: [], + searchEntryPoint: '', + }, + ungrounded: { text: '', groundingSupport: [], supportChunks: [] }, + }); + const [showResponses, setShowResponses] = useState(false); + + const messagesEndRef = useRef(null); + const pathname = usePathname(); + + const toggleSidebar = () => setSidebarOpen(!sidebarOpen); + + useEffect(() => { + if (messagesEndRef.current) { + messagesEndRef.current.scrollIntoView({ behavior: 'smooth' }); + } + }, [messages]); + + const handleMessageFormSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + if (!inputMessage.trim() || isStreaming) return; + sendMessage(inputMessage); + }; + + const sendMessage = async (inputMessage: string) => { + const newUserMessage: Message = { role: 'user', content: inputMessage }; + setMessages((prevMessages) => [...prevMessages, newUserMessage]); + setInputMessage(''); + setIsStreaming(true); + + try { + const response = await fetch('/api/chat', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + messages: [...messages, newUserMessage], + model: selectedModel, + groundingOptions, + googleGrounding, + vertexGrounding, + vertexConfigId: vertexGrounding ? vertexConfigId : undefined, + temperature, + retrievalThreshold, + }), + }); + + if (!response.body) { + throw new Error('Response body is null'); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let accumulatedResponse = ''; + let currentMessage: Message = { role: 'model', content: '' }; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + accumulatedResponse += chunk; + + let jsonChunk; + while (accumulatedResponse.includes('\n')) { + const newlineIndex = accumulatedResponse.indexOf('\n'); + const jsonString = accumulatedResponse.slice(0, newlineIndex); + accumulatedResponse = accumulatedResponse.slice(newlineIndex + 1); + + try { + jsonChunk = JSON.parse(jsonString); + if (jsonChunk.text) { + currentMessage.content += jsonChunk.text; + } + if (jsonChunk.searchEntryPoint) { + currentMessage.searchEntryPoint = jsonChunk.searchEntryPoint; + } + if (jsonChunk.groundingSupport) { + currentMessage.groundingSupport = jsonChunk.groundingSupport; + } + if (jsonChunk.supportChunks) { + currentMessage.supportChunks = jsonChunk.supportChunks; + } + setMessages((prevMessages) => { + const lastMessage = prevMessages[prevMessages.length - 1]; + if (lastMessage.role === 'model') { + return [...prevMessages.slice(0, -1), { ...currentMessage }]; + } else { + return [...prevMessages, { ...currentMessage }]; + } + }); + } catch (error) { + console.error('Error parsing JSON:', error); + } + } + } + + // Handle any remaining response + if (accumulatedResponse) { + try { + const jsonChunk = JSON.parse(accumulatedResponse); + if (jsonChunk.text) { + currentMessage.content += jsonChunk.text; + } + if (jsonChunk.searchEntryPoint) { + currentMessage.searchEntryPoint = jsonChunk.searchEntryPoint; + } + if (jsonChunk.groundingSupport) { + currentMessage.groundingSupport = jsonChunk.groundingSupport; + } + if (jsonChunk.supportChunks) { + currentMessage.supportChunks = jsonChunk.supportChunks; + } + setMessages((prevMessages) => { + const lastMessage = prevMessages[prevMessages.length - 1]; + if (lastMessage.role === 'model') { + return [...prevMessages.slice(0, -1), { ...currentMessage }]; + } else { + return [...prevMessages, { ...currentMessage }]; + } + }); + } catch (error) { + console.error('Error parsing JSON:', error); + } + } + } catch (error) { + console.error('Error in chat request:', error); + } finally { + console.log('setting isStreaming false'); + setIsStreaming(false); + } + }; + + const clearChat = () => { + setMessages([]); + }; + + const handleSearch = async (e: React.FormEvent) => { + e.preventDefault(); + if (!inputMessage.trim()) return; + setIsStreaming(true); + setResponses({ + grounded: { + text: '', + groundingSupport: [], + supportChunks: [], + searchEntryPoint: '', + }, + ungrounded: { text: '', groundingSupport: [], supportChunks: [] }, + }); + setShowResponses(true); + + const fetchGroundedStream = async () => { + const response = await fetch('/api/grounded', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + query: inputMessage, + model: selectedModel, + groundingOptions, + googleGrounding, + vertexGrounding, + vertexConfigId: vertexGrounding ? vertexConfigId : undefined, + retrievalThreshold, + }), + }); + + if (!response.body) { + console.error('Response body is null'); + return { + text: '', + groundingSupport: [], + supportChunks: [], + searchEntryPoint: '', + }; + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + let result: ResponseData = { + text: '', + groundingSupport: [], + supportChunks: [], + searchEntryPoint: '', + }; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + + let newlineIndex; + while ((newlineIndex = buffer.indexOf('\n')) !== -1) { + const jsonString = buffer.slice(0, newlineIndex); + buffer = buffer.slice(newlineIndex + 1); + + try { + const jsonObject = JSON.parse(jsonString); + if (jsonObject.text) { + result.text += jsonObject.text; + } + if (jsonObject.searchEntryPoint) { + result.searchEntryPoint = jsonObject.searchEntryPoint; + } + if (jsonObject.groundingSupport) { + result.groundingSupport = [ + ...(result.groundingSupport || []), + ...(jsonObject.groundingSupport as GroundingSupport[]), + ]; + } + if (jsonObject.supportChunks) { + result.supportChunks = [ + ...(result.supportChunks || []), + ...(jsonObject.supportChunks as SupportChunk[]), + ]; + } + } catch (error) { + console.error('Error parsing JSON:', error); + } + } + + setResponses((prev) => ({ + ...prev, + grounded: result, + })); + } + + // Process any remaining data in the buffer + if (buffer.trim()) { + try { + const jsonObject = JSON.parse(buffer); + if (jsonObject.text) { + result.text += jsonObject.text; + } + if (jsonObject.searchEntryPoint) { + result.searchEntryPoint = jsonObject.searchEntryPoint; + } + if (jsonObject.groundingSupport) { + result.groundingSupport = [ + ...(result.groundingSupport || []), + ...(jsonObject.groundingSupport as GroundingSupport[]), + ]; + } + if (jsonObject.supportChunks) { + result.supportChunks = [ + ...(result.supportChunks || []), + ...(jsonObject.supportChunks as SupportChunk[]), + ]; + } + } catch (error) { + console.error('Error parsing JSON in remaining buffer:', error); + } + } + + return result; + }; + + const fetchUngroundedStream = async () => { + const response = await fetch('/api/ungrounded', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + query: inputMessage, + model: selectedModel, + }), + }); + + if (!response.body) { + console.error('Response body is null'); + return { text: '', groundingSupport: [], supportChunks: [] }; + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let result: ResponseData = { + text: '', + groundingSupport: [], + supportChunks: [], + }; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + result.text += chunk; + + setResponses((prev) => ({ + ...prev, + ungrounded: result, + })); + } + + return result; + }; + + await Promise.all([fetchGroundedStream(), fetchUngroundedStream()]); + + setIsStreaming(false); + }; + + return ( + + ); +} + +export type { GroundingSupport, SupportChunk }; diff --git a/search/grounded-generation-playground/src/app/styles/page.tsx b/search/grounded-generation-playground/src/app/styles/page.tsx new file mode 100644 index 0000000000..ffd6569776 --- /dev/null +++ b/search/grounded-generation-playground/src/app/styles/page.tsx @@ -0,0 +1,198 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import React from 'react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import { Slider } from '@/components/ui/slider'; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from '@/components/ui/card'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +import { cn } from '@/lib/utils'; +import * as Accordion from '@radix-ui/react-accordion'; + +import { + AccordionItem, + AccordionTrigger, + AccordionContent, + AccordionDemo, +} from '@/components/ui/accordion'; + +// import { ChevronDownIcon } from "@radix-ui/react-icons"; + +export default function StylesPage() { + return ( +
    +
    +

    Style Showcase

    +
    +

    Colors

    +
    + {[ + { name: 'Background', class: 'bg-background' }, + { name: 'Foreground', class: 'bg-foreground text-background' }, + { name: 'Card', class: 'bg-card' }, + { + name: 'Card Foreground', + class: 'bg-card-foreground text-background', + }, + { name: 'Popover', class: 'bg-popover' }, + { + name: 'Popover Foreground', + class: 'bg-popover-foreground text-background', + }, + { name: 'Primary', class: 'bg-primary text-primary-foreground' }, + { + name: 'Secondary', + class: 'bg-secondary text-secondary-foreground', + }, + { name: 'Muted', class: 'bg-muted text-muted-foreground' }, + { name: 'Accent', class: 'bg-accent text-accent-foreground' }, + { + name: 'Destructive', + class: 'bg-destructive text-destructive-foreground', + }, + ].map((color) => ( +
    + {color.name} +
    + ))} +
    +
    +
    +

    Typography

    +
    +

    Heading 1

    +

    Heading 2

    +

    Heading 3

    +

    Regular paragraph text

    +

    Small text

    +

    Extra small text

    +
    +
    + +
    +

    Card Variants

    +
    + + + Default Card + This is the default card style + + Default card content + + + + + User Card + This card uses the user style + + User card content + + + + + System Card + This card uses the system style + + System card content + +
    +
    +
    +

    Accordions

    + +
    + + + Custom + + Basic 2nd accordion direct implementation. + + + + + + + Card with Accordion + + + User card content. + + More card content. + + + + + + System Card + This card uses the system style + + System card content + +
    + +
    +

    Components

    + +
    + + + Card Title + + This is a card component. + + + + + + + + + + + + + + + + Tab 1 + Tab 2 + + Content of Tab 1 + Content of Tab 2 + +
    +
    +
    +
    + ); +} diff --git a/search/grounded-generation-playground/src/components/ModelSelector.tsx b/search/grounded-generation-playground/src/components/ModelSelector.tsx new file mode 100644 index 0000000000..7427a540b2 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ModelSelector.tsx @@ -0,0 +1,51 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select'; + +interface ModelSelectorProps { + selectedModel: string; + setSelectedModel: (model: string) => void; +} + +const ModelSelector: React.FC = ({ + selectedModel, + setSelectedModel, +}) => { + return ( + + ); +}; + +export default ModelSelector; diff --git a/search/grounded-generation-playground/src/components/ui/about-page-content.tsx b/search/grounded-generation-playground/src/components/ui/about-page-content.tsx new file mode 100644 index 0000000000..b55ef62e6f --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/about-page-content.tsx @@ -0,0 +1,205 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import Icon from '@/components/ui/icons'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +import { Card, CardContent } from '@/components/ui/card'; +import { + mapOptionsToGroundedGenerationRequest, + GroundedGenerationRequestBody, +} from '@/lib/apiutils'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; + +interface AboutPageContentProps { + selectedModel: string; + googleGrounding: boolean; + vertexGrounding: boolean; + vertexConfigId: string; + temperature: number; + activeAboutTab: string; + setActiveAboutTab: (tab: string) => void; +} + +// Use the GroundedGenerationRequestBody type directly +type RequestObject = GroundedGenerationRequestBody; + +const AboutPageContent: React.FC = ({ + selectedModel, + googleGrounding, + vertexGrounding, + vertexConfigId, + temperature, + activeAboutTab, + setActiveAboutTab, +}) => { + const requestObj: RequestObject = mapOptionsToGroundedGenerationRequest({ + systemInstruction: { parts: { text: 'Your system instruction here' } }, + contents: [{ role: 'user', parts: [{ text: 'Your query here' }] }], + model: selectedModel, + googleGrounding, + vertexGrounding, + vertexConfigId, + }); + + return ( + + + + JavaScript + + + Python + + + Curl Command + + + Notes + + + +
    +          {makeJs(requestObj)}
    +        
    +
    + +
    +          {makePython(requestObj)}
    +        
    +
    + +
    +          {makeCurl(requestObj)}
    +        
    +
    + +
    + {googleGrounding && ( + + +

    + + Google Search +

    + +

    Ground with Google Search.

    +
    +
    +

    Search results provided by Google Search.

    +

    + Dynamic Retrieval can be enabled along with a threshold to only do a + Google Search when we think it's needed. Fewer searches saves you cost + and speeds up the response. +

    +
    +
    +
    + )} + {vertexGrounding && ( + + +

    + + Vertex AI Search +

    +

    Enter your own Vertex AI Search project path.

    +

    + An example would look like:{' '} + + projects/555555555555/locations/global/collections/default_collection/engines/VERTEX_AI_SEARCH_APP_ID/servingConfigs/default_search + +

    +
    +
    + )} +
    +
    +
    + ); +}; + +const makeCurl = (requestObj: RequestObject): string => { + const PROJECT_NUMBER = process.env.PROJECT_NUMBER; + const API_ENDPOINT = `https://discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/global:streamGenerateGroundedContent`; + const requestStr = JSON.stringify(requestObj); + return `curl -X POST \\ +-H "Authorization: Bearer $(gcloud auth print-access-token)" \\ +-H "Content-Type: application/json" \\ +"${API_ENDPOINT}" \\ +-d '${requestStr}'`; +}; + +const makeJs = (requestObj: RequestObject): string => { + const PROJECT_NUMBER = process.env.PROJECT_NUMBER; + const API_ENDPOINT = `https://discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/global:streamGenerateGroundedContent`; + const requestStr = JSON.stringify(requestObj, null, 4); + return ` +const auth = new GoogleAuth({scopes: ['https://www.googleapis.com/auth/cloud-platform']}); +const client = await auth.getClient(); +const accessToken = await client.getAccessToken(); + +const response = await fetch('${API_ENDPOINT}', { + method: 'POST', + headers: { + Authorization: 'Bearer \${accessToken.token}', + 'Content-Type': 'application/json', + }, + body: JSON.parse(\`[ +${requestStr} +]\`), +});`; +}; + +// TODO: abhishekbhgwt@: Update to client library when available +const makePython = (requestObj: RequestObject): string => { + const PROJECT_NUMBER = process.env.PROJECT_NUMBER; + const API_ENDPOINT = `https://discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/global:streamGenerateGroundedContent`; + const requestStr = JSON.stringify(requestObj, null, 4); + return `import requests +import json +import google.auth +credentials, project = google.auth.default(scopes=['https://www.googleapis.com/auth/cloud-platform']) + + +API_ENDPOINT = "${API_ENDPOINT}" + +headers = { + "Authorization": f"Bearer {credentials.token}", + "Content-Type": "application/json" +} + +data = json.loads(f'''[ +${requestStr} +]''') + +response = requests.post(API_ENDPOINT, headers=headers, json=data)`; +}; + +export default AboutPageContent; diff --git a/search/grounded-generation-playground/src/components/ui/accordion.css b/search/grounded-generation-playground/src/components/ui/accordion.css new file mode 100644 index 0000000000..c507c9dd0f --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/accordion.css @@ -0,0 +1,85 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* accordion.css */ +@import url('/global.css'); + +:root { + --accordion-bg-color: hsl(var(--background)); + --accordion-text-color: hsl(var(--foreground)); + --accordion-border-color: hsl(var(--border)); + --accordion-hover-bg-color: hsl(var(--muted)); + --accordion-content-bg-color: hsl(var(--muted) / 50%); +} + +.dark { + --accordion-bg-color: hsl(var(--background)); + --accordion-text-color: hsl(var(--foreground)); + --accordion-border-color: hsl(var(--border)); + --accordion-hover-bg-color: hsl(var(--muted)); + --accordion-content-bg-color: hsl(var(--muted) / 30%); +} + +.accordion-root { + @apply w-full rounded-md border border-[--accordion-border-color] bg-[--accordion-bg-color] shadow-sm; +} + +.accordion-item { + @apply overflow-hidden first:rounded-t-md last:rounded-b-md focus-within:relative focus-within:z-10 focus-within:ring-2 focus-within:ring-ring focus-within:ring-offset-2; +} + +.accordion-trigger { + @apply flex w-full items-center justify-between bg-[--accordion-bg-color] px-4 py-4 font-medium transition-all hover:bg-[--accordion-hover-bg-color] [&[data-state=open]>svg]:rotate-180; +} + +.accordion-content { + @apply overflow-hidden text-sm bg-[--accordion-content-bg-color] transition-all data-[state=closed]:animate-accordion-up data-[state=open]:animate-accordion-down; +} + +.accordion-chevron { + @apply h-4 w-4 shrink-0 text-muted-foreground transition-transform duration-200; +} + +@keyframes accordion-down { + from { + height: 0; + } + + to { + height: var(--radix-accordion-content-height); + } +} + +@keyframes accordion-up { + from { + height: var(--radix-accordion-content-height); + } + + to { + height: 0; + } +} + +/* Add these classes to your global CSS file if not already present */ +@layer utilities { + .animate-accordion-down { + animation: accordion-down 0.2s ease-out; + } + + .animate-accordion-up { + animation: accordion-up 0.2s ease-out; + } +} diff --git a/search/grounded-generation-playground/src/components/ui/accordion.tsx b/search/grounded-generation-playground/src/components/ui/accordion.tsx new file mode 100644 index 0000000000..78f5f4410b --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/accordion.tsx @@ -0,0 +1,121 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// accordion.tsx +'use client'; + +import React from 'react'; +import * as AccordionPrimitive from '@radix-ui/react-accordion'; +import { ChevronDown } from 'lucide-react'; +import { cn } from '@/lib/utils'; + +const Accordion = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +Accordion.displayName = 'Accordion'; + +const AccordionItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AccordionItem.displayName = 'AccordionItem'; + +const AccordionTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + svg]:rotate-180', + 'text-[--accordion-text-color]', + className, + )} + {...props} + > + {children} + + + +)); +AccordionTrigger.displayName = 'AccordionTrigger'; + +const AccordionContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + +
    {children}
    +
    +)); +AccordionContent.displayName = 'AccordionContent'; + +const AccordionDemo = () => ( + + + Is it accessible? + Yes. It adheres to the WAI-ARIA design pattern. + + + + Is it unstyled? + + Yes. It's unstyled by default, giving you freedom over the look and feel. + + + + + Can it be animated? + +
    + Yes! You can animate the Accordion with CSS or JavaScript. +
    +
    +
    +
    +); + +export { Accordion, AccordionItem, AccordionTrigger, AccordionContent, AccordionDemo }; diff --git a/search/grounded-generation-playground/src/components/ui/avatar.tsx b/search/grounded-generation-playground/src/components/ui/avatar.tsx new file mode 100644 index 0000000000..1715f42495 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/avatar.tsx @@ -0,0 +1,66 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as AvatarPrimitive from '@radix-ui/react-avatar'; + +import { cn } from '@/lib/utils'; + +const Avatar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +Avatar.displayName = AvatarPrimitive.Root.displayName; + +const AvatarImage = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AvatarImage.displayName = AvatarPrimitive.Image.displayName; + +const AvatarFallback = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName; + +export { Avatar, AvatarImage, AvatarFallback }; diff --git a/search/grounded-generation-playground/src/components/ui/button.tsx b/search/grounded-generation-playground/src/components/ui/button.tsx new file mode 100644 index 0000000000..592e420d59 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/button.tsx @@ -0,0 +1,72 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as React from 'react'; +import { Slot } from '@radix-ui/react-slot'; +import { cva, type VariantProps } from 'class-variance-authority'; + +import { cn } from '@/lib/utils'; + +const buttonVariants = cva( + 'inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50', + { + variants: { + variant: { + default: 'bg-primary text-primary-foreground shadow hover:bg-primary/90', + destructive: + 'bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90', + outline: + 'border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground', + secondary: + 'bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80', + ghost: 'hover:bg-accent hover:text-accent-foreground', + link: 'text-primary underline-offset-4 hover:underline', + }, + size: { + default: 'h-9 px-4 py-2', + sm: 'h-8 rounded-md px-3 text-xs', + lg: 'h-10 rounded-md px-8', + icon: 'h-9 w-9', + }, + }, + defaultVariants: { + variant: 'default', + size: 'default', + }, + }, +); + +export interface ButtonProps + extends React.ButtonHTMLAttributes, + VariantProps { + asChild?: boolean; +} + +const Button = React.forwardRef( + ({ className, variant, size, asChild = false, ...props }, ref) => { + const Comp = asChild ? Slot : 'button'; + return ( + + ); + }, +); +Button.displayName = 'Button'; + +export { Button, buttonVariants }; diff --git a/search/grounded-generation-playground/src/components/ui/card.tsx b/search/grounded-generation-playground/src/components/ui/card.tsx new file mode 100644 index 0000000000..b4b46d4b63 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/card.tsx @@ -0,0 +1,94 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as React from 'react'; + +import { cn } from '@/lib/utils'; + +const Card = React.forwardRef>( + ({ className, ...props }, ref) => ( +
    + ), +); +Card.displayName = 'Card'; +// const Card = React.forwardRef>( +// ({ className, ...props }, ref) => ( +//
    +// ), +// ); +// Card.displayName = 'Card'; + +const CardHeader = React.forwardRef>( + ({ className, ...props }, ref) => ( +
    + ), +); +CardHeader.displayName = 'CardHeader'; + +const CardTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

    +)); +CardTitle.displayName = 'CardTitle'; + +const CardDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

    +)); +CardDescription.displayName = 'CardDescription'; + +const CardContent = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

    +)); +CardContent.displayName = 'CardContent'; + +const CardFooter = React.forwardRef>( + ({ className, ...props }, ref) => ( +
    + ), +); +CardFooter.displayName = 'CardFooter'; + +export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }; diff --git a/search/grounded-generation-playground/src/components/ui/example-question-greeting.tsx b/search/grounded-generation-playground/src/components/ui/example-question-greeting.tsx new file mode 100644 index 0000000000..aaecd54138 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/example-question-greeting.tsx @@ -0,0 +1,74 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from '@/components/ui/tooltip'; +import { Card, CardContent } from '@/components/ui/card'; +import Icon from '@/components/ui/icons'; + +interface ExampleQuestion { + text: string; + icon?: string; // Make icon optional to match the other definition +} + +interface ExampleQuestionGreetingProps { + greeting: string; + exampleQuestions: ExampleQuestion[]; + onClick: (queryText: string) => void; +} + +const ExampleQuestionGreeting: React.FC = ({ + greeting, + exampleQuestions, + onClick, +}) => { + return ( +
    +

    {greeting}

    +
    + {exampleQuestions.map((query, index) => ( + + + + + onClick(query.text)} + > + {query.icon && ( + + )} +

    {query.text}

    +
    +
    +
    + +

    {query.text}

    +
    +
    +
    + ))} +
    +
    + ); +}; + +export default ExampleQuestionGreeting; diff --git a/search/grounded-generation-playground/src/components/ui/grounded-text-block.tsx b/search/grounded-generation-playground/src/components/ui/grounded-text-block.tsx new file mode 100644 index 0000000000..6708548f9d --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/grounded-text-block.tsx @@ -0,0 +1,128 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import React from 'react'; +import { Card, CardContent } from '@/components/ui/card'; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from '@/components/ui/tooltip'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; +import { transformGroundedContent } from '@/lib/grounded_content_citations'; +import { GroundingSupport, SupportChunk } from '@/app/page'; + +interface GroundedTextBlockProps { + role: 'user' | 'model'; + content: string; + groundingSupport?: GroundingSupport[]; + supportChunks?: SupportChunk[]; + searchEntryPoint?: string; + truncateAfter?: number; +} + +const GroundedTextBlock: React.FC = ({ + content, + groundingSupport, + supportChunks, + searchEntryPoint, + truncateAfter = 1500, +}) => { + if (!content) return null; + if ( + !groundingSupport || + !supportChunks || + groundingSupport.length === 0 || + supportChunks.length === 0 + ) { + return {content}; + } + + // Transform into citedContent and a simple list of citations. + const { citedContent, citations } = transformGroundedContent({ + content, + groundingSupport, + supportChunks, + truncateAfter, + }); + const isSourcesShown = + (searchEntryPoint && searchEntryPoint.length > 0) || + (citedContent && citations && citations.length > 0); + + return ( +
    + {citedContent} + {isSourcesShown && ( +

    Sources:

    + )} + {searchEntryPoint && searchEntryPoint.length > 0 && ( +
    + ); +}; + +const niceUri = (uri: string) => { + if (!uri) return ''; + return uri; +}; + +export default GroundedTextBlock; diff --git a/search/grounded-generation-playground/src/components/ui/grounding-option.tsx b/search/grounded-generation-playground/src/components/ui/grounding-option.tsx new file mode 100644 index 0000000000..954ca84b5d --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/grounding-option.tsx @@ -0,0 +1,76 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { Switch } from '@/components/ui/switch'; +import { Label } from '@/components/ui/label'; +import Icon from '@/components/ui/icons'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; + +import { grounding_options } from '@/lib/grounding_options'; + +interface GroundingOptionProps { + // this option our of all of them. + groundingKey: string; + groundingOptions: string[]; + setGroundingOptions: (model: string[]) => void; +} + +const GroundingOption: React.FC = ({ + groundingKey, + groundingOptions, + setGroundingOptions, +}) => { + if (!groundingOptions) return null; + const checked = groundingOptions.includes(groundingKey); + const setThisGroundingOption = () => { + if (checked) { + setGroundingOptions( + groundingOptions.filter((option: string) => option !== groundingKey), + ); + } else { + setGroundingOptions([...groundingOptions, groundingKey]); + } + }; + const config = grounding_options[groundingKey]; + const vertexConfigId = ''; + return ( +
    +
    + + +
    + {checked && ( +
    +
    {config.retriever}
    +
    + {config.subtext} +
    +
    + )} +
    + ); +}; + +export default GroundingOption; diff --git a/search/grounded-generation-playground/src/components/ui/icons.tsx b/search/grounded-generation-playground/src/components/ui/icons.tsx new file mode 100644 index 0000000000..2091fefb3e --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/icons.tsx @@ -0,0 +1,252 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import { cn } from '@/lib/utils'; + +import { + ChartLine, + CircleDollarSign, + Code, + Cpu, + DollarSign, + Eclipse, + Film, + Globe, + Lock, + Microscope, + PackageSearch, + Search, + Send, + ShoppingCart, + Trash2, + User, + Youtube, +} from 'lucide-react'; + +const IconGemini = ({ className, ...props }: React.ComponentProps<'svg'>) => { + return ( + + + + + + + + + + + + + ); +}; + +const IconVertexAi = ({ className, ...props }: React.ComponentProps<'svg'>) => { + return ( + + + + + + + + + + + + + + + + + + + + + + + + + + + ); +}; +const IconElastic = ({ className, ...props }: React.ComponentProps<'svg'>) => { + return ( + + + + + + + + + + ); +}; + +const IconSpinner = ({ className, ...props }: React.ComponentProps<'svg'>) => { + return ( + + + + ); +}; + +const IconGoogle = ({ className, ...props }: React.ComponentProps<'svg'>) => { + return ( + + + + ); +}; + +interface DynamicIconProps { + className?: string; + type: string; + props?: React.ComponentProps<'svg'>; +} +const DynamicIcon: React.FC = ({ type, className, ...props }) => { + const iconMap = { + google: IconGoogle, + model: IconGemini, + gemini: IconGemini, + vertex: IconVertexAi, + elastic: IconElastic, + chartLine: ChartLine, + cart: ShoppingCart, + code: Code, + cpu: Cpu, + dollar: DollarSign, + dollarCircle: CircleDollarSign, + eclipse: Eclipse, + film: Film, + globe: Globe, + lock: Lock, + microscope: Microscope, + packageSearch: PackageSearch, + search: Search, + send: Send, + trash: Trash2, + user: User, + web: Globe, + youtube: Youtube, + }; + if (type in iconMap) { + const IconComponent = iconMap[type as keyof typeof iconMap]; + return ( + + ); + } + return ⚠️ Bad Icon {type}; +}; +DynamicIcon.displayName = 'Icon'; + +export { DynamicIcon, IconGemini, IconSpinner, IconVertexAi, IconElastic }; +export default DynamicIcon; diff --git a/search/grounded-generation-playground/src/components/ui/input.tsx b/search/grounded-generation-playground/src/components/ui/input.tsx new file mode 100644 index 0000000000..f62c52876d --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/input.tsx @@ -0,0 +1,40 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as React from 'react'; + +import { cn } from '@/lib/utils'; + +export interface InputProps extends React.InputHTMLAttributes {} + +const Input = React.forwardRef( + ({ className, type, ...props }, ref) => { + return ( + + ); + }, +); +Input.displayName = 'Input'; + +export { Input }; diff --git a/search/grounded-generation-playground/src/components/ui/label.tsx b/search/grounded-generation-playground/src/components/ui/label.tsx new file mode 100644 index 0000000000..c4e7810629 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/label.tsx @@ -0,0 +1,38 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as LabelPrimitive from '@radix-ui/react-label'; +import { cva, type VariantProps } from 'class-variance-authority'; + +import { cn } from '@/lib/utils'; + +const labelVariants = cva( + 'text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70', +); + +const Label = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef & + VariantProps +>(({ className, ...props }, ref) => ( + +)); +Label.displayName = LabelPrimitive.Root.displayName; + +export { Label }; diff --git a/search/grounded-generation-playground/src/components/ui/page-header.tsx b/search/grounded-generation-playground/src/components/ui/page-header.tsx new file mode 100644 index 0000000000..5ac3c04eb3 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/page-header.tsx @@ -0,0 +1,53 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { Button } from '@/components/ui/button'; +import { Menu } from 'lucide-react'; + +interface PageHeaderProps { + toggleSidebar: () => void; +} + +const PageHeader: React.FC = ({ toggleSidebar }) => { + return ( +
    +
    +
    +

    + Vertex AI Search Grounded Generation Playground +

    + +
    +
    +
    + ); +}; + +export default PageHeader; diff --git a/search/grounded-generation-playground/src/components/ui/page-nav-tabs.tsx b/search/grounded-generation-playground/src/components/ui/page-nav-tabs.tsx new file mode 100644 index 0000000000..df89f926e4 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/page-nav-tabs.tsx @@ -0,0 +1,54 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Link from 'next/link'; + +interface PageNavTabsProps { + pathname: string; +} + +const PageNavTabs: React.FC = ({ pathname }) => { + return ( +
    +
    + +
    +
    + ); +}; + +export default PageNavTabs; diff --git a/search/grounded-generation-playground/src/components/ui/page-sidebar.tsx b/search/grounded-generation-playground/src/components/ui/page-sidebar.tsx new file mode 100644 index 0000000000..7c643c3f89 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/page-sidebar.tsx @@ -0,0 +1,147 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import { Switch } from '@/components/ui/switch'; +import { Label } from '@/components/ui/label'; +import ModelSelector from '@/components/ModelSelector'; +import { Slider } from '@/components/ui/slider'; +import { X } from 'lucide-react'; +import Icon from '@/components/ui/icons'; + +interface PageSidebarProps { + toggleSidebar: () => void; + sidebarOpen: boolean; + selectedModel: string; + setSelectedModel: (model: string) => void; + googleGrounding: boolean; + setGoogleGrounding: (enabled: boolean) => void; + retrievalThreshold: number; + setRetrievalThreshold: (threshold: number) => void; + vertexGrounding: boolean; + setVertexGrounding: (enabled: boolean) => void; + vertexConfigId: string; + setVertexConfigId: (configId: string) => void; +} + +const PageSidebar: React.FC = ({ + toggleSidebar, + sidebarOpen, + selectedModel, + setSelectedModel, + googleGrounding, + setGoogleGrounding, + retrievalThreshold, + setRetrievalThreshold, + vertexGrounding, + setVertexGrounding, + vertexConfigId, + setVertexConfigId, +}) => { + return ( +
    +
    +

    Configure Grounding

    + +
    +
    +
    + + +
    +
    +
    + + +
    + {googleGrounding && ( +
    + + setRetrievalThreshold(value[0])} + className="w-full" + /> +
    {retrievalThreshold.toFixed(2)}
    +
    + )} +
    +
    +
    + + +
    + {vertexGrounding && ( +
    + + ) => + setVertexConfigId(e.target.value) + } + className="bg-zinc-800 text-white border-zinc-700" + placeholder="Enter your Vertex AI Search Serving Config" + /> +
    + )} +
    +
    +
    + ); +}; + +export default PageSidebar; diff --git a/search/grounded-generation-playground/src/components/ui/scroll-area.tsx b/search/grounded-generation-playground/src/components/ui/scroll-area.tsx new file mode 100644 index 0000000000..c3774a4dc6 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/scroll-area.tsx @@ -0,0 +1,63 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as ScrollAreaPrimitive from '@radix-ui/react-scroll-area'; + +import { cn } from '@/lib/utils'; + +const ScrollArea = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + {children} + + + + +)); +ScrollArea.displayName = ScrollAreaPrimitive.Root.displayName; + +const ScrollBar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, orientation = 'vertical', ...props }, ref) => ( + + + +)); +ScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName; + +export { ScrollArea, ScrollBar }; diff --git a/search/grounded-generation-playground/src/components/ui/select.tsx b/search/grounded-generation-playground/src/components/ui/select.tsx new file mode 100644 index 0000000000..e9c4f617f8 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/select.tsx @@ -0,0 +1,173 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import { + CaretSortIcon, + CheckIcon, + ChevronDownIcon, + ChevronUpIcon, +} from '@radix-ui/react-icons'; +import * as SelectPrimitive from '@radix-ui/react-select'; + +import { cn } from '@/lib/utils'; + +const Select = SelectPrimitive.Root; + +const SelectGroup = SelectPrimitive.Group; + +const SelectValue = SelectPrimitive.Value; + +const SelectTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + span]:line-clamp-1', + className, + )} + {...props} + > + {children} + + + + +)); +SelectTrigger.displayName = SelectPrimitive.Trigger.displayName; + +const SelectScrollUpButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)); +SelectScrollUpButton.displayName = SelectPrimitive.ScrollUpButton.displayName; + +const SelectScrollDownButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)); +SelectScrollDownButton.displayName = SelectPrimitive.ScrollDownButton.displayName; + +const SelectContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, position = 'popper', ...props }, ref) => ( + + + + + {children} + + + + +)); +SelectContent.displayName = SelectPrimitive.Content.displayName; + +const SelectLabel = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +SelectLabel.displayName = SelectPrimitive.Label.displayName; + +const SelectItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + + + + + {children} + +)); +SelectItem.displayName = SelectPrimitive.Item.displayName; + +const SelectSeparator = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +SelectSeparator.displayName = SelectPrimitive.Separator.displayName; + +export { + Select, + SelectGroup, + SelectValue, + SelectTrigger, + SelectContent, + SelectLabel, + SelectItem, + SelectSeparator, + SelectScrollUpButton, + SelectScrollDownButton, +}; diff --git a/search/grounded-generation-playground/src/components/ui/slider.tsx b/search/grounded-generation-playground/src/components/ui/slider.tsx new file mode 100644 index 0000000000..a33c05d961 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/slider.tsx @@ -0,0 +1,41 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as SliderPrimitive from '@radix-ui/react-slider'; + +import { cn } from '@/lib/utils'; + +const Slider = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + + + + +)); +Slider.displayName = SliderPrimitive.Root.displayName; + +export { Slider }; diff --git a/search/grounded-generation-playground/src/components/ui/switch.tsx b/search/grounded-generation-playground/src/components/ui/switch.tsx new file mode 100644 index 0000000000..58625ff423 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/switch.tsx @@ -0,0 +1,45 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as SwitchPrimitives from '@radix-ui/react-switch'; + +import { cn } from '@/lib/utils'; + +const Switch = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)); +Switch.displayName = SwitchPrimitives.Root.displayName; + +export { Switch }; diff --git a/search/grounded-generation-playground/src/components/ui/tabs.tsx b/search/grounded-generation-playground/src/components/ui/tabs.tsx new file mode 100644 index 0000000000..750fe14370 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/tabs.tsx @@ -0,0 +1,71 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as TabsPrimitive from '@radix-ui/react-tabs'; + +import { cn } from '@/lib/utils'; + +const Tabs = TabsPrimitive.Root; + +const TabsList = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +TabsList.displayName = TabsPrimitive.List.displayName; + +const TabsTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +TabsTrigger.displayName = TabsPrimitive.Trigger.displayName; + +const TabsContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +TabsContent.displayName = TabsPrimitive.Content.displayName; + +export { Tabs, TabsList, TabsTrigger, TabsContent }; diff --git a/search/grounded-generation-playground/src/components/ui/tooltip.tsx b/search/grounded-generation-playground/src/components/ui/tooltip.tsx new file mode 100644 index 0000000000..922c2e19e6 --- /dev/null +++ b/search/grounded-generation-playground/src/components/ui/tooltip.tsx @@ -0,0 +1,46 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use client'; + +import * as React from 'react'; +import * as TooltipPrimitive from '@radix-ui/react-tooltip'; + +import { cn } from '@/lib/utils'; + +const TooltipProvider = TooltipPrimitive.Provider; + +const Tooltip = TooltipPrimitive.Root; + +const TooltipTrigger = TooltipPrimitive.Trigger; + +const TooltipContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, sideOffset = 4, ...props }, ref) => ( + +)); +TooltipContent.displayName = TooltipPrimitive.Content.displayName; + +export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }; diff --git a/search/grounded-generation-playground/src/lib/apiutils.ts b/search/grounded-generation-playground/src/lib/apiutils.ts new file mode 100644 index 0000000000..3f4cf6975d --- /dev/null +++ b/search/grounded-generation-playground/src/lib/apiutils.ts @@ -0,0 +1,223 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { makeGroundingSearchSource } from '@/lib/grounding_option_utils'; + +const responseCandidateToResult = (candidate: any) => { + let result: any = {}; + + if ( + candidate.content && + candidate.content.parts && + candidate.content.parts[0] && + candidate.content.parts[0].text + ) { + result.text = candidate.content.parts[0].text; + } + + if (candidate.groundingMetadata && candidate.groundingMetadata.searchEntryPoint) { + result.searchEntryPoint = + candidate.groundingMetadata.searchEntryPoint.renderedContent; + } + + if (candidate.groundingMetadata && candidate.groundingMetadata.groundingSupport) { + result.groundingSupport = candidate.groundingMetadata.groundingSupport; + } + + if (candidate.groundingMetadata && candidate.groundingMetadata.supportChunks) { + result.supportChunks = candidate.groundingMetadata.supportChunks; + } + return result; +}; + +const iteratorToStream = (iterator: AsyncIterator) => { + return new ReadableStream({ + async pull(controller) { + const { value, done } = await iterator.next(); + if (done) { + controller.close(); + } else { + controller.enqueue(value); + } + }, + }); +}; + +async function* processApiResponse(response: Response) { + const reader = response.body?.getReader(); + if (!reader) return; + + const decoder = new TextDecoder(); + let buffer = ''; + + while (true) { + const readerOutput = await reader.read(); + if (!readerOutput || readerOutput.done) break; + if (!readerOutput.value) break; + + buffer += decoder.decode(readerOutput.value, { stream: true }); + + let bracketCount = 0; + let jsonStartIndex = buffer.indexOf('{'); + + while (jsonStartIndex !== -1) { + let jsonEndIndex = jsonStartIndex; + + for (let i = jsonStartIndex; i < buffer.length; i++) { + if (buffer[i] === '{') bracketCount++; + if (buffer[i] === '}') bracketCount--; + + if (bracketCount === 0) { + jsonEndIndex = i + 1; + break; + } + } + + if (bracketCount === 0) { + const jsonString = buffer.slice(jsonStartIndex, jsonEndIndex); + buffer = buffer.slice(jsonEndIndex); + + try { + const jsonObject = JSON.parse(jsonString); + if (jsonObject.candidates && jsonObject.candidates[0]) { + const candidate = jsonObject.candidates[0]; + const result = responseCandidateToResult(candidate); + if (Object.keys(result).length > 0) { + yield JSON.stringify(result) + '\n'; + } + } + } catch (error) { + console.error('Error parsing JSON:', error); + } + + jsonStartIndex = buffer.indexOf('{'); + } else { + break; + } + } + } + + if (buffer.trim()) { + const trimmedBuffer = buffer.trim(); + if (trimmedBuffer !== ']' && trimmedBuffer !== '}') { + try { + const jsonObject = JSON.parse(trimmedBuffer); + if (jsonObject.candidates && jsonObject.candidates[0]) { + const candidate = jsonObject.candidates[0]; + const result = responseCandidateToResult(candidate); + if (Object.keys(result).length > 0) { + yield JSON.stringify(result) + '\n'; + } + } + } catch (error) { + console.error( + 'Error parsing JSON in remaining buffer:', + error, + 'Buffer:', + trimmedBuffer, + ); + } + } + } +} + +export interface OptionsGroundedGenerationContent { + role: string; + parts: { text: string }[]; +} + +export interface OptionsGroundedGenerationRequest { + systemInstruction: { + parts: { text: string }; + }; + contents: OptionsGroundedGenerationContent[]; + model?: string; + googleGrounding?: boolean; + vertexGrounding?: boolean; + vertexConfigId?: string; +} + +export interface GroundedGenerationRequestBody { + systemInstruction: { + parts: { text: string }; + }; + contents: OptionsGroundedGenerationContent[]; + groundingSpec: { + groundingSources: { + googleSearchSource?: {}; + searchSource?: { + servingConfig: string; + }; + }[]; + }; + generationSpec: { + modelId: string; + temperature: number; + topP: number; + topK: number; + }; +} + +const mapOptionsToGroundedGenerationRequest = ({ + systemInstruction, + contents, + model, + googleGrounding, + vertexGrounding, + vertexConfigId, +}: OptionsGroundedGenerationRequest) => { + const requestBody: GroundedGenerationRequestBody = { + systemInstruction, + contents, + groundingSpec: { + groundingSources: [], + }, + generationSpec: { + modelId: model || 'gemini-1.5-flash', + temperature: 0.9, + topP: 1, + topK: 1, + }, + }; + + if (googleGrounding) { + requestBody.groundingSpec.groundingSources.push({ + googleSearchSource: {}, + }); + } + + if (vertexGrounding && vertexConfigId) { + requestBody.groundingSpec.groundingSources.push({ + searchSource: { + servingConfig: vertexConfigId, + }, + }); + } + + if (requestBody.generationSpec.modelId === 'gemini-1.5-flash-high-fidelity') { + console.log('⚠️ swap model back to gemini-1.5-flash, until allowlisted ⚠️'); + requestBody.generationSpec.modelId = 'gemini-1.5-flash'; + } + + return requestBody; +}; + +export { + responseCandidateToResult, + iteratorToStream, + processApiResponse, + mapOptionsToGroundedGenerationRequest, +}; diff --git a/search/grounded-generation-playground/src/lib/grounded_content_citations.ts b/search/grounded-generation-playground/src/lib/grounded_content_citations.ts new file mode 100644 index 0000000000..4f1bbef37d --- /dev/null +++ b/search/grounded-generation-playground/src/lib/grounded_content_citations.ts @@ -0,0 +1,145 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { StringToBoolean } from 'class-variance-authority/types'; + +interface GroundedContent { + content: string; + groundingSupport?: GroundingSupport[]; + supportChunks?: SupportChunk[]; + searchEntryPoint?: string; + truncateAfter: number; +} + +interface GroundingSupport { + supportChunkIndices: number[]; + claimText: string; +} + +interface SupportChunkMetadata { + uri?: string; + domain?: string; + title?: string; + index?: number; + source?: string; +} + +interface SupportChunk { + index: number; + sourceMetadata?: SupportChunkMetadata; + chunkText?: string; +} + +interface TransformedCitation { + chunkIndex: number; + uri: string; + title: string; + text: string; + source: string; + index?: number; +} + +interface TransformedContent { + citedContent: string; + citations: TransformedCitation[]; +} + +/** + * Transforms the output from the Grounded Generation API into a format that allows for inline citations. + * + * This function takes the original content, grounding support information, support chunks, and search entry point + * as input. It processes the grounding support information to insert citations into the original content and + * prepares a list of citations with their corresponding metadata. + * + * @param {GroundedContent} groundedContent - The content and metadata returned from the Grounded Generation API. + * @returns {TransformedContent} - An object containing the content with citations inserted and a list of citations. + */ +export function transformGroundedContent({ + content, + groundingSupport, + supportChunks = [], + searchEntryPoint, + truncateAfter, +}: GroundedContent): TransformedContent { + let citedContent = content; + const usedSources = new Set(); + const citations: TransformedCitation[] = []; + + // Support chunks are provided by index, make a convenient lookup map. + const supportChunksByIndex = new Map(); + supportChunks.forEach((supportChunk, i) => { + const index = supportChunk.index || i; + if ( + supportChunksByIndex.has(index) && + supportChunksByIndex.get(index) !== supportChunk + ) { + console.error( + `Clobbered supportChunk at ${index}, new ${JSON.stringify( + supportChunk, + )}, old ${JSON.stringify(supportChunksByIndex.get(index))}`, + ); + } + supportChunksByIndex.set(index, supportChunk); + }); + + if (groundingSupport) { + groundingSupport.forEach((support, supportIndex) => { + const citation = support.supportChunkIndices.map((i) => `[${i + 1}] `).join(''); + citedContent = citedContent.replace( + support.claimText, + `${support.claimText} ${citation}`, + ); + support.supportChunkIndices.forEach((chunkIndex) => { + const chunk = supportChunksByIndex.get(chunkIndex) || supportChunks[chunkIndex]; + if (!chunk) return; + + const sourceMetadata = chunk.sourceMetadata || {}; + const uri = sourceMetadata.uri || ''; + const title = sourceMetadata.title || sourceMetadata.domain || ''; + const index = sourceMetadata.index ?? chunkIndex; + let source = sourceMetadata.source; + const text = chunk.chunkText + ? chunk.chunkText.substring(0, truncateAfter || 1500) + : 'No text available'; + + if (uri.length === 0) return; + if (usedSources.has(uri)) return; + usedSources.add(uri); + + if ( + uri.substring(0, 63) == + 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/' + ) { + source = 'Google Search'; + } + + citations.push({ + chunkIndex, + uri, + title, + text, + source: source || '', + index, + }); + }); + }); + } + + return { + citedContent, + citations, + }; +} diff --git a/search/grounded-generation-playground/src/lib/grounding_option_utils.ts b/search/grounded-generation-playground/src/lib/grounding_option_utils.ts new file mode 100644 index 0000000000..b723dc90ea --- /dev/null +++ b/search/grounded-generation-playground/src/lib/grounding_option_utils.ts @@ -0,0 +1,57 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { grounding_options } from './grounding_options'; + +export interface ExampleQuestion { + text: string; + icon?: string; +} + +const makeVAISServingConfigPath = (config: { + project_id?: string; + app_id?: string; + location?: string; +}) => { + const project_id = config.project_id || 'ERR_MISSING_PROJECT_ID'; + const app_id = config.app_id || 'ERR_MISSING_APP_ID'; + const location = config.location || 'global'; + return `projects/${project_id}/locations/${location}/collections/default_collection/engines/${app_id}/servingConfigs/default_search`; +}; + +export const getConfigByKey = (groundingKey: keyof typeof grounding_options) => { + return grounding_options[groundingKey]; +}; + +export const makeGroundingSearchSource = (vertexConfigId: string) => { + return { servingConfig: vertexConfigId }; +}; + +const exampleQuestionsDefault: ExampleQuestion[] = [ + { text: 'When is the next total solar eclipse in US?', icon: 'eclipse' }, + { + text: "How many qubits does the world's largest quantum computer have?", + icon: 'cpu', + }, + { + text: 'Who was the CEO of YouTube when Google acquired it?', + icon: 'youtube', + }, +]; + +export const makeExampleQuestions = (): ExampleQuestion[] => { + return exampleQuestionsDefault.slice(0, 3); +}; diff --git a/search/grounded-generation-playground/src/lib/grounding_options.ts b/search/grounded-generation-playground/src/lib/grounding_options.ts new file mode 100644 index 0000000000..f2febd0aab --- /dev/null +++ b/search/grounded-generation-playground/src/lib/grounding_options.ts @@ -0,0 +1,39 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface ExampleQuestion { + text: string; + icon?: string; +} + +interface GroundingOption { + data: string; + retriever: string; + subtext: string; + description: string; + type: string; + project_id?: string; + app_id?: string; + data_store_id?: string; + servingConfig?: string; + icon: string; + hidden?: boolean; + exampleQuestions?: ExampleQuestion[]; +} + +const grounding_options: Record = {}; + +export { grounding_options }; diff --git a/search/grounded-generation-playground/src/lib/utils.ts b/search/grounded-generation-playground/src/lib/utils.ts new file mode 100644 index 0000000000..1feaf7e6b9 --- /dev/null +++ b/search/grounded-generation-playground/src/lib/utils.ts @@ -0,0 +1,22 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { type ClassValue, clsx } from 'clsx'; +import { twMerge } from 'tailwind-merge'; + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} diff --git a/search/grounded-generation-playground/tailwind.config.js b/search/grounded-generation-playground/tailwind.config.js new file mode 100644 index 0000000000..e205c8d720 --- /dev/null +++ b/search/grounded-generation-playground/tailwind.config.js @@ -0,0 +1,92 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @type {import('tailwindcss').Config} */ +module.exports = { + darkMode: ['class'], + content: [ + './pages/**/*.{ts,tsx}', + './components/**/*.{ts,tsx}', + './app/**/*.{ts,tsx}', + './src/**/*.{ts,tsx}', + ], + theme: { + container: { + center: true, + padding: '2rem', + screens: { + '2xl': '1400px', + }, + }, + extend: { + colors: { + border: 'hsl(var(--border))', + input: 'hsl(var(--input))', + ring: 'hsl(var(--ring))', + background: 'hsl(var(--background))', + foreground: 'hsl(var(--foreground))', + primary: { + DEFAULT: 'hsl(var(--primary))', + foreground: 'hsl(var(--primary-foreground))', + }, + secondary: { + DEFAULT: 'hsl(var(--secondary))', + foreground: 'hsl(var(--secondary-foreground))', + }, + destructive: { + DEFAULT: 'hsl(var(--destructive))', + foreground: 'hsl(var(--destructive-foreground))', + }, + muted: { + DEFAULT: 'hsl(var(--muted))', + foreground: 'hsl(var(--muted-foreground))', + }, + accent: { + DEFAULT: 'hsl(var(--accent))', + foreground: 'hsl(var(--accent-foreground))', + }, + popover: { + DEFAULT: 'hsl(var(--popover))', + foreground: 'hsl(var(--popover-foreground))', + }, + card: { + DEFAULT: 'hsl(var(--card))', + foreground: 'hsl(var(--card-foreground))', + }, + }, + borderRadius: { + lg: 'var(--radius)', + md: 'calc(var(--radius) - 2px)', + sm: 'calc(var(--radius) - 4px)', + }, + keyframes: { + 'accordion-down': { + from: { height: 0 }, + to: { height: 'var(--radix-accordion-content-height)' }, + }, + 'accordion-up': { + from: { height: 'var(--radix-accordion-content-height)' }, + to: { height: 0 }, + }, + }, + animation: { + 'accordion-down': 'accordion-down 0.2s ease-out', + 'accordion-up': 'accordion-up 0.2s ease-out', + }, + }, + }, + plugins: [require('tailwindcss-animate'), require('@tailwindcss/typography')], +}; diff --git a/search/grounded-generation-playground/tailwind.config.ts b/search/grounded-generation-playground/tailwind.config.ts new file mode 100644 index 0000000000..9cf51429cd --- /dev/null +++ b/search/grounded-generation-playground/tailwind.config.ts @@ -0,0 +1,35 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type { Config } from 'tailwindcss'; + +const config: Config = { + content: [ + './src/pages/**/*.{js,ts,jsx,tsx,mdx}', + './src/components/**/*.{js,ts,jsx,tsx,mdx}', + './src/app/**/*.{js,ts,jsx,tsx,mdx}', + ], + theme: { + extend: { + colors: { + background: 'var(--background)', + foreground: 'var(--foreground)', + }, + }, + }, + plugins: [], +}; +export default config; diff --git a/search/grounded-generation-playground/tsconfig.json b/search/grounded-generation-playground/tsconfig.json new file mode 100644 index 0000000000..c33416d319 --- /dev/null +++ b/search/grounded-generation-playground/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} From 3548c74fbf15e1f3dc67373e23e2cde1551800dd Mon Sep 17 00:00:00 2001 From: eliasecchig <115624100+eliasecchig@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:22:41 +0200 Subject: [PATCH 68/76] fix: add pylint to starter pack dependencies (#1245) Co-authored-by: Owl Bot --- .../e2e-gen-ai-app-starter-pack/poetry.lock | 103 ++++++++++++++++-- .../pyproject.toml | 6 +- 2 files changed, 96 insertions(+), 13 deletions(-) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock index 4e862fdc8b..919fdc9cfa 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/poetry.lock @@ -305,6 +305,20 @@ types-python-dateutil = ">=2.8.10" doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] +[[package]] +name = "astroid" +version = "3.3.5" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.9.0" +files = [ + {file = "astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8"}, + {file = "astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + [[package]] name = "asttokens" version = "2.4.1" @@ -950,6 +964,21 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] +[[package]] +name = "dill" +version = "0.3.9" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + [[package]] name = "distro" version = "1.9.0" @@ -1391,12 +1420,12 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] proto-plus = ">=1.22.3,<2.0.0dev" protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" @@ -1629,8 +1658,8 @@ google-cloud-core = ">=2.0.0,<3.0.0dev" grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" opentelemetry-api = ">=1.9.0" proto-plus = [ - {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, + {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, ] protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" @@ -2293,6 +2322,20 @@ files = [ [package.dependencies] arrow = ">=0.15.0" +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + [[package]] name = "jedi" version = "0.19.1" @@ -2902,8 +2945,8 @@ langchain-core = ">=0.3.10,<0.4.0" langchain-text-splitters = ">=0.3.0,<0.4.0" langsmith = ">=0.1.17,<0.2.0" numpy = [ - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, ] pydantic = ">=2.7.4,<3.0.0" PyYAML = ">=5.3" @@ -2929,8 +2972,8 @@ langchain = ">=0.3.3,<0.4.0" langchain-core = ">=0.3.10,<0.4.0" langsmith = ">=0.1.125,<0.2.0" numpy = [ - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, ] pydantic-settings = ">=2.4.0,<3.0.0" PyYAML = ">=5.3" @@ -2954,8 +2997,8 @@ jsonpatch = ">=1.33,<2.0" langsmith = ">=0.1.125,<0.2.0" packaging = ">=23.2,<25" pydantic = [ - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, ] PyYAML = ">=5.3" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" @@ -3077,8 +3120,8 @@ files = [ httpx = ">=0.23.0,<1" orjson = ">=3.9.14,<4.0.0" pydantic = [ - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, ] requests = ">=2,<3" requests-toolbelt = ">=1.0.0,<2.0.0" @@ -4766,9 +4809,9 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -5355,8 +5398,8 @@ files = [ annotated-types = ">=0.6.0" pydantic-core = "2.23.4" typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, ] [package.extras] @@ -5528,6 +5571,35 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pylint" +version = "3.3.1" +description = "python code static checker" +optional = false +python-versions = ">=3.9.0" +files = [ + {file = "pylint-3.3.1-py3-none-any.whl", hash = "sha256:2f846a466dd023513240bc140ad2dd73bfc080a5d85a710afdb728c420a5a2b9"}, + {file = "pylint-3.3.1.tar.gz", hash = "sha256:9f3dcc87b1203e612b78d91a896407787e708b3f189b5fa0b307712d49ff0c6e"}, +] + +[package.dependencies] +astroid = ">=3.3.4,<=3.4.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + [[package]] name = "pymdown-extensions" version = "10.11.2" @@ -7052,6 +7124,17 @@ files = [ {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, +] + [[package]] name = "tornado" version = "6.4.1" @@ -7861,4 +7944,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "f3b44fd0947cbca385b2474cd6209435ffc35ff183eb0a05aa621d563860a151" +content-hash = "ba355f051c81d7f4efbcef2c4172b94963a9e33367ffcc0cdf6dc18a2ecb99cd" diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml index 4808cac87e..311e787894 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/pyproject.toml @@ -24,8 +24,6 @@ grpcio = "1.64.1" langgraph = "^0.2.21" uvicorn = {extras = ["standard"], version = "^0.30.5"} immutabledict = "^4.2.0" -types-pyyaml = "^6.0.12.20240917" -types-requests = "^2.32.0.20240914" langchain-core = "^0.3.9" @@ -61,7 +59,9 @@ flake8-pyproject = "^1.2.3" mypy = "^1" codespell = "^2.2.0" black = "^24.8.0" - +pylint = "^3.3.1" +types-pyyaml = "^6.0.12.20240917" +types-requests = "^2.32.0.20240914" [tool.mypy] disallow_untyped_calls = true From d18dd5ac2b04d182e82c5315f6e5a5078cd55877 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 11 Oct 2024 20:22:57 +0200 Subject: [PATCH 69/76] chore(deps): update dependency next to v14.2.10 [security] (#1249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [next](https://nextjs.org) ([source](https://redirect.github.com/vercel/next.js)) | [`14.2.9` -> `14.2.10`](https://renovatebot.com/diffs/npm/next/14.2.9/14.2.10) | [![age](https://developer.mend.io/api/mc/badges/age/npm/next/14.2.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/next/14.2.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/next/14.2.9/14.2.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/next/14.2.9/14.2.10?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. ### GitHub Vulnerability Alerts #### [CVE-2024-46982](https://redirect.github.com/vercel/next.js/security/advisories/GHSA-gp8f-8m3g-qvj9) ### Impact By sending a crafted HTTP request, it is possible to poison the cache of a non-dynamic server-side rendered route in the pages router (this does not affect the app router). When this crafted request is sent it could coerce Next.js to cache a route that is meant to not be cached and send a `Cache-Control: s-maxage=1, stale-while-revalidate` header which some upstream CDNs may cache as well. To be potentially affected all of the following must apply: - Next.js between 13.5.1 and 14.2.9 - Using pages router - Using non-dynamic server-side rendered routes e.g. `pages/dashboard.tsx` not `pages/blog/[slug].tsx` The below configurations are unaffected: - Deployments using only app router - Deployments on [Vercel](https://vercel.com/) are not affected ### Patches This vulnerability was resolved in Next.js v13.5.7, v14.2.10, and later. We recommend upgrading regardless of whether you can reproduce the issue or not. ### Workarounds There are no official or recommended workarounds for this issue, we recommend that users patch to a safe version. #### Credits - Allam Rachid (zhero_) - Henry Chen --- ### Release Notes
    vercel/next.js (next) ### [`v14.2.10`](https://redirect.github.com/vercel/next.js/compare/v14.2.9...v14.2.10) [Compare Source](https://redirect.github.com/vercel/next.js/compare/v14.2.9...v14.2.10)
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). --- .../package-lock.json | 88 +++++++++---------- .../package.json | 2 +- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/search/grounded-generation-playground/package-lock.json b/search/grounded-generation-playground/package-lock.json index cfd4562fd8..b00752b29f 100644 --- a/search/grounded-generation-playground/package-lock.json +++ b/search/grounded-generation-playground/package-lock.json @@ -28,7 +28,7 @@ "jsonstream": "^1.0.3", "jsonstream-next": "^3.0.0", "lucide-react": "^0.439.0", - "next": "14.2.9", + "next": "14.2.10", "react": "^18", "react-dom": "^18", "react-markdown": "^9.0.1", @@ -1569,9 +1569,9 @@ } }, "node_modules/@next/env": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.9.tgz", - "integrity": "sha512-hnDAoDPMii31V0ivibI8p6b023jOF1XblWTVjsDUoZKwnZlaBtJFZKDwFqi22R8r9i6W08dThUWU7Bsh2Rg8Ww==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.10.tgz", + "integrity": "sha512-dZIu93Bf5LUtluBXIv4woQw2cZVZ2DJTjax5/5DOs3lzEOeKLy7GxRSr4caK9/SCPdaW6bCgpye6+n4Dh9oJPw==", "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { @@ -1627,9 +1627,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.9.tgz", - "integrity": "sha512-/kfQifl3uLYi3DlwFlzCkgxe6fprJNLzzTUFknq3M5wGYicDIbdGlxUl6oHpVLJpBB/CBY3Y//gO6alz/K4NWA==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.10.tgz", + "integrity": "sha512-V3z10NV+cvMAfxQUMhKgfQnPbjw+Ew3cnr64b0lr8MDiBJs3eLnM6RpGC46nhfMZsiXgQngCJKWGTC/yDcgrDQ==", "cpu": [ "arm64" ], @@ -1643,9 +1643,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.9.tgz", - "integrity": "sha512-tK/RyhCmOCiXQ9IVdFrBbZOf4/1+0RSuJkebXU2uMEsusS51TjIJO4l8ZmEijH9gZa0pJClvmApRHi7JuBqsRw==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.10.tgz", + "integrity": "sha512-Y0TC+FXbFUQ2MQgimJ/7Ina2mXIKhE7F+GUe1SgnzRmwFY3hX2z8nyVCxE82I2RicspdkZnSWMn4oTjIKz4uzA==", "cpu": [ "x64" ], @@ -1659,9 +1659,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.9.tgz", - "integrity": "sha512-tS5eqwsp2nO7mzywRUuFYmefNZsUKM/mTG3exK2jIHv9TEVklE1SByB1KMhFkqlit1PxS9YK1tV8BOV90Wpbrw==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.10.tgz", + "integrity": "sha512-ZfQ7yOy5zyskSj9rFpa0Yd7gkrBnJTkYVSya95hX3zeBG9E55Z6OTNPn1j2BTFWvOVVj65C3T+qsjOyVI9DQpA==", "cpu": [ "arm64" ], @@ -1675,9 +1675,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.9.tgz", - "integrity": "sha512-8svpeTFNAMTUMKQbEzE8qRAwl9o7mNBv7LR1bmSkQvo1oy4WrNyZbhWsldOiKrc4mZ5dfQkGYsI9T75mIFMfeA==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.10.tgz", + "integrity": "sha512-n2i5o3y2jpBfXFRxDREr342BGIQCJbdAUi/K4q6Env3aSx8erM9VuKXHw5KNROK9ejFSPf0LhoSkU/ZiNdacpQ==", "cpu": [ "arm64" ], @@ -1691,9 +1691,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.9.tgz", - "integrity": "sha512-0HNulLWpKTB7H5BhHCkEhcRAnWUHeAYCftrrGw3QC18+ZywTdAoPv/zEqKy/0adqt+ks4JDdlgSQ1lNKOKjo0A==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.10.tgz", + "integrity": "sha512-GXvajAWh2woTT0GKEDlkVhFNxhJS/XdDmrVHrPOA83pLzlGPQnixqxD8u3bBB9oATBKB//5e4vpACnx5Vaxdqg==", "cpu": [ "x64" ], @@ -1707,9 +1707,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.9.tgz", - "integrity": "sha512-hhVFViPHLAVUJRNtwwm609p9ozWajOmRvzOZzzKXgiVGwx/CALxlMUeh+M+e0Zj6orENhWLZeilOPHpptuENsA==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.10.tgz", + "integrity": "sha512-opFFN5B0SnO+HTz4Wq4HaylXGFV+iHrVxd3YvREUX9K+xfc4ePbRrxqOuPOFjtSuiVouwe6uLeDtabjEIbkmDA==", "cpu": [ "x64" ], @@ -1723,9 +1723,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.9.tgz", - "integrity": "sha512-p/v6XlOdrk06xfN9z4evLNBqftVQUWiyduQczCwSj7hNh8fWTbzdVxsEiNOcajMXJbQiaX/ZzZdFgKVmmJnnGQ==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.10.tgz", + "integrity": "sha512-9NUzZuR8WiXTvv+EiU/MXdcQ1XUvFixbLIMNQiVHuzs7ZIFrJDLJDaOF1KaqttoTujpcxljM/RNAOmw1GhPPQQ==", "cpu": [ "arm64" ], @@ -1739,9 +1739,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.9.tgz", - "integrity": "sha512-IcW9dynWDjMK/0M05E3zopbRen7v0/yEaMZbHFOSS1J/w+8YG3jKywOGZWNp/eCUVtUUXs0PW+7Lpz8uLu+KQA==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.10.tgz", + "integrity": "sha512-fr3aEbSd1GeW3YUMBkWAu4hcdjZ6g4NBl1uku4gAn661tcxd1bHs1THWYzdsbTRLcCKLjrDZlNp6j2HTfrw+Bg==", "cpu": [ "ia32" ], @@ -1755,9 +1755,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.9.tgz", - "integrity": "sha512-gcbpoXyWZdVOBgNa5BRzynrL5UR1nb2ZT38yKgnphYU9UHjeecnylMHntrQiMg/QtONDcJPFC/PmsS47xIRYoA==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.10.tgz", + "integrity": "sha512-UjeVoRGKNL2zfbcQ6fscmgjBAS/inHBh63mjIlfPg/NG8Yn2ztqylXt5qilYb6hoHIwaU2ogHknHWWmahJjgZQ==", "cpu": [ "x64" ], @@ -10297,12 +10297,12 @@ "license": "MIT" }, "node_modules/next": { - "version": "14.2.9", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.9.tgz", - "integrity": "sha512-3CzBNo6BuJnRjcQvRw+irnU1WiuJNZEp+dkzkt91y4jeIDN/Emg95F+takSYiLpJ/HkxClVQRyqiTwYce5IVqw==", + "version": "14.2.10", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.10.tgz", + "integrity": "sha512-sDDExXnh33cY3RkS9JuFEKaS4HmlWmDKP1VJioucCG6z5KuA008DPsDZOzi8UfqEk3Ii+2NCQSJrfbEWtZZfww==", "license": "MIT", "dependencies": { - "@next/env": "14.2.9", + "@next/env": "14.2.10", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -10317,15 +10317,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.9", - "@next/swc-darwin-x64": "14.2.9", - "@next/swc-linux-arm64-gnu": "14.2.9", - "@next/swc-linux-arm64-musl": "14.2.9", - "@next/swc-linux-x64-gnu": "14.2.9", - "@next/swc-linux-x64-musl": "14.2.9", - "@next/swc-win32-arm64-msvc": "14.2.9", - "@next/swc-win32-ia32-msvc": "14.2.9", - "@next/swc-win32-x64-msvc": "14.2.9" + "@next/swc-darwin-arm64": "14.2.10", + "@next/swc-darwin-x64": "14.2.10", + "@next/swc-linux-arm64-gnu": "14.2.10", + "@next/swc-linux-arm64-musl": "14.2.10", + "@next/swc-linux-x64-gnu": "14.2.10", + "@next/swc-linux-x64-musl": "14.2.10", + "@next/swc-win32-arm64-msvc": "14.2.10", + "@next/swc-win32-ia32-msvc": "14.2.10", + "@next/swc-win32-x64-msvc": "14.2.10" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", diff --git a/search/grounded-generation-playground/package.json b/search/grounded-generation-playground/package.json index 98bb7ca808..8599da9309 100644 --- a/search/grounded-generation-playground/package.json +++ b/search/grounded-generation-playground/package.json @@ -34,7 +34,7 @@ "jsonstream": "^1.0.3", "jsonstream-next": "^3.0.0", "lucide-react": "^0.439.0", - "next": "14.2.9", + "next": "14.2.10", "react": "^18", "react-dom": "^18", "react-markdown": "^9.0.1", From bfc21948d160b03544f9ae6da0168800e2136c68 Mon Sep 17 00:00:00 2001 From: Eric Dong Date: Fri, 11 Oct 2024 16:07:45 -0400 Subject: [PATCH 70/76] fix: Remove a problematic scenario for Gemini Flash model (#1250) # Description The PDF analysis with both PDF and images scenario is consistently failing on Flash model so temporary remove the scenario from the example. --- .../intro_gemini_1_5_flash.ipynb | 61 ------------------- 1 file changed, 61 deletions(-) diff --git a/gemini/getting-started/intro_gemini_1_5_flash.ipynb b/gemini/getting-started/intro_gemini_1_5_flash.ipynb index 588ffa9767..4257c239ce 100644 --- a/gemini/getting-started/intro_gemini_1_5_flash.ipynb +++ b/gemini/getting-started/intro_gemini_1_5_flash.ipynb @@ -509,67 +509,6 @@ "print(response.text)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "52ltdcv5EsaM" - }, - "outputs": [], - "source": [ - "image_file_path = \"cloud-samples-data/generative-ai/image/cumulative-average.png\"\n", - "image_file_url = f\"https://storage.googleapis.com/{image_file_path}\"\n", - "image_file_uri = f\"gs://{image_file_path}\"\n", - "\n", - "IPython.display.Image(image_file_url, width=450)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "EEmrMpRMHyel" - }, - "outputs": [], - "source": [ - "prompt = \"\"\"\n", - "Task: Answer the following questions based on a PDF document and image file provided in the context.\n", - "\n", - "Instructions:\n", - "- Look through the image and the PDF document carefully and answer the question.\n", - "- Give a short and terse answer to the following question.\n", - "- Do not paraphrase or reformat the text you see in the image.\n", - "- Cite the source of page number for the PDF document provided as context.\n", - "\n", - " Questions:\n", - " - What is in the given image?\n", - " - Is there a similar graph in the given document?\n", - "\n", - "Context:\n", - "\"\"\"\n", - "\n", - "pdf_file = Part.from_uri(pdf_file_uri, mime_type=\"application/pdf\")\n", - "image_file = Part.from_uri(image_file_uri, mime_type=\"image/png\")\n", - "\n", - "contents = [\n", - " pdf_file,\n", - " image_file,\n", - " prompt,\n", - "]\n", - "\n", - "response = model.generate_content(contents)\n", - "print(response.text)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RIwBUZTyLJh0" - }, - "source": [ - "Gemini 1.5 Flash is able to identify and locate the graph on page 10 from the PDF document.\n" - ] - }, { "cell_type": "markdown", "metadata": { From b997be839d61b33322a2b1c9a0dcb194b6215d22 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sun, 13 Oct 2024 20:33:30 +0200 Subject: [PATCH 71/76] chore(deps): update terraform terraform-google-modules/log-export/google to v10 (#1247) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [terraform-google-modules/log-export/google](https://registry.terraform.io/modules/terraform-google-modules/log-export/google) ([source](https://redirect.github.com/terraform-google-modules/terraform-google-log-export)) | module | major | `8.1.0` -> `10.0.0` | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
    terraform-google-modules/terraform-google-log-export (terraform-google-modules/log-export/google) ### [`v10.0.0`](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/blob/HEAD/CHANGELOG.md#1000-2024-09-20) [Compare Source](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/compare/v9.0.0...v10.0.0) ##### ⚠ BREAKING CHANGES - Terraform version 1.3+ required and allow max provider version 6.X ([#​235](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/235)) - **TPG >= 5.27:** Add intercept_children support for log sinks ([#​229](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/229)) ##### Features - add support for expiration policy ttl in push topic subscriptio… ([#​226](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/226)) ([59e738a](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/commit/59e738ac6752533c22a51d76be7fbe1dd4c8cbda)) - **TPG >= 5.27:** Add intercept_children support for log sinks ([#​229](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/229)) ([da7a7d4](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/commit/da7a7d446321986aa18031371936da6cae48a7d1)) ##### Bug Fixes - Terraform version 1.3+ required and allow max provider version 6.X ([#​235](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/235)) ([4ad56e1](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/commit/4ad56e18aaa2f92589bcfb03d3890a4e6e6db63e)) ### [`v9.0.0`](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/blob/HEAD/CHANGELOG.md#900-2024-08-16) [Compare Source](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/compare/v8.1.0...v9.0.0) ##### ⚠ BREAKING CHANGES - **TPG>=5.22:** added support for soft delete policy in storage sub-module ([#​224](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/224)) ##### Features - **TPG>=5.22:** added support for soft delete policy in storage sub-module ([#​224](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/issues/224)) ([05ea76f](https://redirect.github.com/terraform-google-modules/terraform-google-log-export/commit/05ea76f02d22ba456b7cc4fac5339d4722a96882))
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: eliasecchig <115624100+eliasecchig@users.noreply.github.com> --- .../deployment/terraform/dev/log_sinks.tf | 4 ++-- .../deployment/terraform/log_sinks.tf | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf index 80fa73e42d..633bc99b86 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/log_sinks.tf @@ -9,7 +9,7 @@ resource "google_project_iam_member" "bigquery_data_editor" { module "log_export_to_bigquery" { source = "terraform-google-modules/log-export/google" - version = "8.1.0" + version = "10.0.0" log_sink_name = var.telemetry_sink_name parent_resource_type = "project" @@ -31,7 +31,7 @@ resource "google_bigquery_dataset" "feedback_dataset" { module "feedback_export_to_bigquery" { source = "terraform-google-modules/log-export/google" - version = "8.1.0" + version = "10.0.0" log_sink_name = var.feedback_sink_name parent_resource_type = "project" parent_resource_id = var.dev_project_id diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf index 676d68215c..8119a242f1 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/log_sinks.tf @@ -10,7 +10,7 @@ module "log_export_to_bigquery" { for_each = local.project_ids source = "terraform-google-modules/log-export/google" - version = "8.1.0" + version = "10.0.0" log_sink_name = var.telemetry_sink_name parent_resource_type = "project" @@ -37,7 +37,7 @@ module "feedback_export_to_bigquery" { for_each = local.project_ids source = "terraform-google-modules/log-export/google" - version = "8.1.0" + version = "10.0.0" log_sink_name = var.feedback_sink_name parent_resource_type = "project" parent_resource_id = each.value From 619052218fd7454fde86e9fc796c31f7c704e5ee Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sun, 13 Oct 2024 20:38:27 +0200 Subject: [PATCH 72/76] chore(deps): update terraform google to v6 (#1246) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [google](https://registry.terraform.io/providers/hashicorp/google) ([source](https://redirect.github.com/hashicorp/terraform-provider-google)) | required_provider | major | `>= 3.53.0, < 6.0.0` -> `< 7.0.0` | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
    hashicorp/terraform-provider-google (google) ### [`v6.6.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#660-October-7-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.5.0...v6.6.0) FEATURES: - **New Resource:** `google_dataproc_batch` ([#​19686](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19686)) - **New Resource:** `google_healthcare_pipeline_job` ([#​19717](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19717)) - **New Resource:** `google_site_verification_owner` ([#​19641](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19641)) IMPROVEMENTS: - assuredworkloads: added `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS` and `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT` enum values to `compliance_regime` in the `google_assuredworkload_workload` resource ([#​19714](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19714)) - compute: added ` bgp_best_path_selection_mode `,`bgp_bps_always_compare_med` and ` bgp_bps_inter_region_cost ` fields to `google_compute_network` resource ([#​19708](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19708)) - compute: added ` next_hop_origin `,` next_hop_med ` and ` next_hop_inter_region_cost ` output fields to `google_compute_route` resource ([#​19708](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19708)) - compute: added enum `STATEFUL_COOKIE_AFFINITY` and `strong_session_affinity_cookie` field to `google_compute_backend_service` and `google_compute_region_backend_service` resource ([#​19665](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19665)) - compute: moved `TDX` instance option for `confidential_instance_type` in `google_compute_instance` from Beta to GA ([#​19706](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19706)) - containeraws: added `kubelet_config` field group to the `google_container_aws_node_pool` resource ([#​19714](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19714)) - pubsub: added GCS ingestion settings and platform log settings to `google_pubsub_topic` resource ([#​19669](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19669)) - sourcerepo: added `create_ignore_already_exists` field to `google_sourcerepo_repository` resource ([#​19716](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19716)) - sql: added in-place update support for `settings.time_zone` in `google_sql_database_instance` resource ([#​19654](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19654)) - tags: increased maximum accepted input length for the `short_name` field in `google_tags_tag_key` and `google_tags_tag_value` resources ([#​19712](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19712)) BUG FIXES: - bigquery: fixed `google_bigquery_dataset_iam_member` to be able to delete itself and overwrite the existing iam members for bigquery dataset keeping the authorized datasets as they are. ([#​19682](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19682)) - bigquery: fixed an error which could occur with service account field values containing non-lower-case characters in `google_bigquery_dataset_access` ([#​19705](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19705)) - compute: fixed an issue where the `boot_disk.initialize_params.resource_policies` field in `google_compute_instance` forced a resource recreation when used in combination with `google_compute_disk_resource_policy_attachment` ([#​19692](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19692)) - compute: fixed the issue that `labels` is not set when creating the resource `google_compute_interconnect` ([#​19632](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19632)) - tags: removed `google_tags_location_tag_binding` resource from the Terraform state when its parent resource has been removed outside of Terraform ([#​19693](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19693)) - workbench: fixed a bug in the `google_workbench_instance` resource where the removal of `labels` was not functioning as expected. ([#​19620](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19620)) ### [`v6.5.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#650-September-30-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.4.0...v6.5.0) DEPRECATIONS: - compute: deprecated `macsec.pre_shared_keys.fail_open` field in `google_compute_interconnect` resource. Use the new `macsec.fail_open` field instead ([#​19572](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19572)) FEATURES: - **New Data Source:** `google_compute_region_instance_group_manager` ([#​19589](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19589)) - **New Data Source:** `google_privileged_access_manager_entitlement` ([#​19580](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19580)) - **New Data Source:** `google_secret_manager_regional_secret_version_access` ([#​19538](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19538)) - **New Data Source:** `google_secret_manager_regional_secret_version` ([#​19514](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19514)) - **New Data Source:** `google_secret_manager_regional_secrets` ([#​19532](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19532)) - **New Resource:** `google_compute_router_nat_address` ([#​19550](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19550)) - **New Resource:** `google_logging_log_scope` ([#​19559](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19559)) IMPROVEMENTS: - apigee: added `activate` field to `google_apigee_nat_address` resource ([#​19591](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19591)) - bigquery: added `biglake_configuration` field to `google_bigquery_table` resource to support BigLake Managed Tables ([#​19541](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19541)) - cloudrunv2: promoted `scaling` field in `google_cloud_run_v2_service` resource to GA ([#​19588](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19588)) - composer: promoted `config.workloads_config.cloud_data_lineage_integration` field in `google_composer_environment` resource to GA ([#​19612](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19612)) - compute: added `existing_reservations` field to `google_compute_region_commitment` resource ([#​19585](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19585)) - compute: added `hostname` field to `google_compute_instance` data source ([#​19607](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19607)) - compute: added `initial_nat_ip` field to `google_compute_router_nat` resource ([#​19550](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19550)) - compute: added `macsec.fail_open` field to `google_compute_interconnect` resource ([#​19572](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19572)) - compute: added `SUSPENDED` as a possible value to `desired_state` field in `google_compute_instance` resource ([#​19586](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19586)) - compute: added import support for `projects/{{project}}/meta-data/{{key}}` format for `google_compute_project_metadata_item` resource ([#​19613](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19613)) - compute: marked `customer_name` and `location` fields as optional in `google_compute_interconnect` resource to support cross cloud interconnect ([#​19619](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19619)) - container: added `linux_node_config.hugepages_config` field to `google_container_node_pool` resource ([#​19521](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19521)) - container: promoted `gcfs_config` field in `google_container_cluster` resource to GA ([#​19617](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19617)) - looker: added `psc_enabled` and `psc_config` fields to `google_looker_instance` resource ([#​19523](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19523)) - networkconnectivity: added `include_import_ranges` field to `google_network_connectivity_spoke` resource for `linked_vpn_tunnels`, `linked_interconnect_attachments` and `linked_router_appliance_instances` ([#​19530](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19530)) - secretmanagerregional: added `version_aliases` field to `google_secret_manager_regional_secret` resource ([#​19514](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19514)) - workbench: increased create timeout to 20 minutes for `google_workbench_instance` resource ([#​19551](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19551)) BUG FIXES: - bigquery: fixed in-place update of `google_bigquery_table` resource when `external_data_configuration.schema` field is set ([#​19558](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19558)) - bigquerydatapolicy: fixed permadiff on `policy_tag` field in `google_bigquery_datapolicy_data_policy` resource ([#​19563](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19563)) - composer: fixed `storage_config.bucket` field to support a bucket name with or without "gs://" prefix ([#​19552](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19552)) - container: added support for setting `addons_config.gcp_filestore_csi_driver_config` and `enable_autopilot` in the same `google_container_cluster` ([#​19590](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19590)) - container: fixed `node_config.kubelet_config` updates in `google_container_cluster` resource ([#​19562](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19562)) - container: fixed a bug where specifying `node_pool_defaults.node_config_defaults` with `enable_autopilot = true` would cause `google_container_cluster` resource creation failure ([#​19543](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19543)) - workbench: fixed a bug in the `google_workbench_instance` resource where the removal of `labels` was not functioning as expected ([#​19620](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19620)) ### [`v6.4.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#640-September-23-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.3.0...v6.4.0) DEPRECATIONS: - securitycenterv2: deprecated `google_scc_v2_organization_scc_big_query_exports`. Use `google_scc_v2_organization_scc_big_query_export` instead. ([#​19457](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19457)) FEATURES: - **New Data Source:** `google_secret_manager_regional_secret_version` ([#​19514](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19514)) - **New Data Source:** `google_secret_manager_regional_secret` ([#​19491](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19491)) - **New Resource:** `google_database_migration_service_migration_job` ([#​19488](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19488)) - **New Resource:** `google_discovery_engine_target_site` ([#​19469](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19469)) - **New Resource:** `google_healthcare_workspace` ([#​19476](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19476)) - **New Resource:** `google_scc_folder_scc_big_query_export` ([#​19480](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19480)) - **New Resource:** `google_scc_organization_scc_big_query_export` ([#​19465](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19465)) - **New Resource:** `google_scc_project_scc_big_query_export` ([#​19466](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19466)) - **New Resource:** `google_scc_v2_organization_scc_big_query_export` ([#​19457](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19457)) - **New Resource:** `google_secret_manager_regional_secret_version` ([#​19504](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19504)) - **New Resource:** `google_secret_manager_regional_secret` ([#​19461](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19461)) - **New Resource:** `google_site_verification_web_resource` ([#​19477](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19477)) - **New Resource:** `google_spanner_backup_schedule` ([#​19449](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19449)) IMPROVEMENTS: - alloydb: added `enable_outbound_public_ip` field to `google_alloydb_instance` resource ([#​19444](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19444)) - apigee: added in-place update for `consumer_accept_list` field in `google_apigee_instance` resource ([#​19442](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19442)) - compute: added `interface` field to `google_compute_attached_disk` resource ([#​19440](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19440)) - compute: added in-place update in `google_compute_interconnect` resource, except for `remote_location` and `requested_features` fields ([#​19508](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19508)) - filestore: added `deletion_protection_enabled` and `deletion_protection_reason` fields to `google_filestore_instance` resource ([#​19446](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19446)) - looker: added `fips_enabled` field to `google_looker_instance` resource ([#​19511](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19511)) - metastore: added `deletion_protection` field to `google_dataproc_metastore_service` resource ([#​19505](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19505)) - netapp: added `allow_auto_tiering` field to `google_netapp_storage_pool` resource ([#​19454](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19454)) - netapp: added `tiering_policy` field to `google_netapp_volume` resource ([#​19454](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19454)) - secretmanagerregional: added `version_aliases` field to `google_secret_manager_regional_secret` resource ([#​19514](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19514)) - spanner: added `edition` field to `google_spanner_instance` resource ([#​19449](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19449)) BUG FIXES: - compute: fixed a permadiff on `iap` field in `google_compute_backend` and `google_compute_region_backend` resources ([#​19509](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19509)) - container: fixed a bug where specifying `node_pool_defaults.node_config_defaults` with `enable_autopilot = true` will cause `google_container_cluster` resource creation failure ([#​19543](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19543)) - container: fixed a permadiff on `node_config.gcfs_config` field in `google_container_cluster` and `google_container_node_pool` resources ([#​19512](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19512)) - container: fixed the in-place update for `node_config.gcfs_config` field in `google_container_cluster` and `google_container_node_pool` resources ([#​19512](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19512)) - container: made `node_config.kubelet_config.cpu_manager_policy` field optional to fix its update in `google_container_cluster` resource ([#​19464](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19464)) - dns: fixed a permadiff on `dnssec_config` field in `google_dns_managed_zone` resource ([#​19456](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19456)) - pubsub: allowed `filter` field to contain line breaks in `google_pubsub_subscription` resource ([#​19451](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19451)) ### [`v6.3.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#630-September-16-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.2.0...v6.3.0) FEATURES: - **New Data Source:** `google_bigquery_tables` ([#​19402](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19402)) - **New Resource:** `google_developer_connect_connection` ([#​19431](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19431)) - **New Resource:** `google_developer_connect_git_repository_link` ([#​19431](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19431)) - **New Resource:** `google_memorystore_instance` ([#​19398](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19398)) IMPROVEMENTS: - compute: added `connected_endpoints.consumer_network` and `connected_endpoints.psc_connection_id` fields to `google_compute_service_attachment` resource ([#​19426](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19426)) - compute: added field `http_keep_alive_timeout_sec` to `google_region_compute_target_https_proxy` and `google_region_compute_target_http_proxy` resources ([#​19432](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19432)) - compute: added support for `boot_disk.initialize_params.resource_policies` in `google_compute_instance` and `google_instance_template` ([#​19407](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19407)) - container: added `storage_pools` to `node_config` in `google_container_cluster` and `google_container_node_pool` ([#​19423](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19423)) - containerattached: added `security_posture_config` field to `google_container_attached_cluster` resource ([#​19411](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19411)) - netapp: added `large_capacity` and `multiple_endpoints` to `google_netapp_volume` resource ([#​19384](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19384)) - resourcemanager: added `tags` field to `google_folder` to allow setting tags for folders at creation time ([#​19380](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19380)) BUG FIXES: - compute: setting `network_ip` to "" will no longer cause diff and will be treated the same as `null` ([#​19400](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19400)) - dataproc: updated `google_dataproc_cluster` to protect against handling nil `kerberos_config` values ([#​19401](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19401)) - dns: added a mutex to `google_dns_record_set` to prevent conflicts when multiple resources attempt to operate on the same record set ([#​19416](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19416)) - managedkafka: added 5 second wait post `google_managed_kafka_topic` creation to fix eventual consistency errors ([#​19429](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19429)) ### [`v6.2.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#620-September-9-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.1.0...v6.2.0) FEATURES: - **New Data Source:** `google_certificate_manager_certificates` ([#​19361](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19361)) - **New Resource:** `google_network_security_server_tls_policy` ([#​19314](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19314)) - **New Resource:** `google_scc_v2_folder_scc_big_query_export` ([#​19327](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19327)) - **New Resource:** `google_scc_v2_project_scc_big_query_export` ([#​19311](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19311)) IMPROVEMENTS: - assuredworkload: added field `partner_service_billing_account` to `google_assured_workloads_workload` ([#​19358](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19358)) - bigtable: added support for `column_family.type` in `google_bigtable_table` ([#​19302](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19302)) - cloudrun: promoted support for nfs and csi volumes (for Cloud Storage FUSE) for `google_cloud_run_service` to GA ([#​19359](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19359)) - cloudrunv2: promoted support for nfs and gcs volumes for `google_cloud_run_v2_job` to GA ([#​19359](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19359)) - compute: added `boot_disk.interface` field to `google_compute_instance` resource ([#​19319](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19319)) - container: added `node_pool_auto_config.node_kublet_config.insecure_kubelet_readonly_port_enabled` field to `google_container_cluster`. ([#​19320](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19320)) - container: added `insecure_kubelet_readonly_port_enabled` to `node_pool.node_config.kubelet_config` and `node_config.kubelet_config` in `google_container_node_pool` resource. ([#​19312](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19312)) - container: added `insecure_kubelet_readonly_port_enabled` to `node_pool_defaults.node_config_defaults`, `node_pool.node_config.kubelet_config`, and `node_config.kubelet_config` in `google_container_cluster` resource. ([#​19312](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19312)) - container: added support for in-place updates for `google_compute_node_pool.node_config.gcfs_config` and `google_container_cluster.node_config.gcfs_cluster` and `google_container_cluster.node_pool.node_config.gcfs_cluster` ([#​19365](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19365)) - container: promoted the `additive_vpc_scope_dns_domain` field on the `google_container_cluster` resource to GA ([#​19313](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19313)) - iambeta: added `x509` field to ` google_iam_workload_identity_pool_provider ` resource ([#​19375](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19375)) - networkconnectivity: added `include_export_ranges` to `google_network_connectivity_spoke` ([#​19346](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19346)) - pubsub: added `cloud_storage_config.max_messages` and `cloud_storage_config.avro_config.use_topic_schema` fields to `google_pubsub_subscription` resource ([#​19338](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19338)) - redis: added the `maintenance_policy` field to the `google_redis_cluster` resource ([#​19341](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19341)) - resourcemanager: added `tags` field to `google_project` to allow setting tags for projects at creation time ([#​19351](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19351)) - securitycenter: added support for empty `streaming_config.filter` values in `google_scc_notification_config` resources ([#​19369](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19369)) BUG FIXES: - compute: fixed `google_compute_interconnect` to support correct `available_features` option of `IF_MACSEC` ([#​19330](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19330)) - compute: fixed a bug where `advertised_route_priority` was accidentally set to 0 during updates in `google_compute_router_peer` ([#​19366](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19366)) - compute: fixed a permadiff caused by setting `start_time` in an incorrect H:mm format in `google_compute_resource_policies` resources ([#​19297](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19297)) - compute: fixed `network_interface.subnetwork_project` validation to match with the project in `network_interface.subnetwork` field when `network_interface.subnetwork` has full self_link in `google_compute_instance` resource ([#​19348](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19348)) - container: removed unnecessary force replacement in node pool `gcfs_config` ([#​19365](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19365) - kms: updated the `google_kms_autokey_config` resource's `folder` field to accept values that are either full resource names (`folders/{folder_id}`) or just the folder id (`{folder_id}` only) ([#​19364](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19364))) - storage: added retry support for 429 errors in `google_storage_bucket` resource ([#​19353](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19353)) ### [`v6.1.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#610-September-4-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.0.1...v6.1.0) FEATURES: - **New Data Source:** `google_kms_crypto_key_latest_version` ([#​19249](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19249)) - **New Data Source:** `google_kms_crypto_key_versions` ([#​19241](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19241)) IMPROVEMENTS: - databasemigrationservice: added support in `google_database_migration_service_connection_profile` for creating DMS connection profiles that link to existing Cloud SQL instances/AlloyDB clusters. ([#​19291](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19291)) - alloydb: added `subscription_type` and `trial_metadata` field to `google_alloydb_cluster` resource ([#​19262](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19262)) - bigquery: added `encryption_configuration` field to `google_bigquery_data_transfer_config` resource ([#​19267](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19267)) - bigqueryanalyticshub: added `selected_resources`, and `restrict_direct_table_access` to `google_bigquery_analytics_hub_listing` resource ([#​19244](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19244)) - bigqueryanalyticshub: added `sharing_environment_config` to `google_bigquery_analytics_hub_data_exchange` resource ([#​19244](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19244)) - cloudtasks: added `http_target` field to `google_cloud_tasks_queue` resource ([#​19253](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19253)) - compute: added `accelerators` field to `google_compute_node_template` resource ([#​19292](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19292)) - compute: allowed disabling `server_tls_policy` during update in `google_compute_target_https_proxy` resources ([#​19233](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19233)) - container: added `secret_manager_config` field to `google_container_cluster` resource ([#​19288](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19288)) - datastream: added `transaction_logs` and `change_tables` to the `datastream_stream` resource ([#​19248](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19248)) - discoveryengine: added `chunking_config` and `layout_parsing_config` fields to `google_discovery_engine_data_store` resource ([#​19274](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19274)) - dlp: added `inspect_template_modified_cadence` field to `big_query_target` and `cloud_sql_target` in `google_data_loss_prevention_discovery_config` resource ([#​19282](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19282)) - dlp: added `tag_resources` field to `google_data_loss_prevention_discovery_config` resource ([#​19282](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19282)) - networksecurity: promoted `google_network_security_client_tls_policy` to GA ([#​19293](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19293)) BUG FIXES: - bigquery: fixed an error which could occur with email field values containing non-lower-case characters in `google_bigquery_dataset_access` resource ([#​19259](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19259)) - bigqueryanalyticshub: made `bigquery_dataset` immutable in `google_bigquery_analytics_hub_listing` as it was not updatable in the API. Now modifying the field in Terraform will correctly recreate the resource rather than causing Terraform to report it would attempt an invalid update. ([#​19244](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19244)) - container: fixed update inconsistency in `google_container_cluster` resource ([#​19247](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19247)) - pubsub: fixed a validation bug that didn't allow empty filter definitions for `google_pubsub_subscription` resources ([#​19284](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19284)) - resourcemanager: fixed a bug where data.google_client_config failed silently when inadequate credentials were used to configure the provider ([#​19286](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19286)) - sql: fixed importing `google_sql_user` where `host` is an IPv4 CIDR ([#​19243](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19243)) - sql: fixed overwriting of `name` field for IAM Group user in `google_sql_user` resource ([#​19234](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19234)) ### [`v6.0.1`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#601-August-26-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v6.0.0...v6.0.1) BREAKING CHANGES: - sql: removed `settings.ip_configuration.require_ssl` from `google_sql_database_instance` in favor of `settings.ip_configuration.ssl_mode`. This field was intended to be removed in 6.0.0. ([#​19263](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19263)) ### [`v6.0.0`](https://redirect.github.com/hashicorp/terraform-provider-google/blob/HEAD/CHANGELOG.md#600-August-26-2024) [Compare Source](https://redirect.github.com/hashicorp/terraform-provider-google/compare/v5.44.1...v6.0.0) [Terraform Google Provider 6.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version\_6\_upgrade) BREAKING CHANGES: - provider: changed provider labels to add the `goog-terraform-provisioned: true` label by default. ([#​19190](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19190)) - activedirectory: added `deletion_protection` field to `google_active_directory_domain` resource. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection = false` before destroying the resource. ([#​18906](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18906)) - alloydb: removed `network` in `google_alloy_db_cluster`. Use `network_config.network` instead. ([#​19181](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19181)) - bigquery: added client-side validation to prevent table view creation if schema contains required fields for `google_bigquery_table` resource ([#​18767](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18767)) - bigquery: removed `allow_resource_tags_on_deletion` from `google_bigquery_table`. Resource tags are now always allowed on table deletion. ([#​19077](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19077)) - bigqueryreservation: removed `multi_region_auxiliary` from `google_bigquery_reservation` ([#​18922](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18922)) - billing: revised the format of `id` for `google_billing_project_info` ([#​18823](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18823)) - cloudrunv2: added `deletion_protection` field to `google_cloudrunv2_service`. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection = false` before destroying the resource.([#​19019](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19019)) - cloudrunv2: changed `liveness_probe` to no longer infer a default value from api on `google_cloud_run_v2_service`. Removing this field and applying the change will now remove liveness probe from the Cloud Run service. ([#​18764](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18764)) - cloudrunv2: retyped `containers.env` to SET from ARRAY for `google_cloud_run_v2_service` and `google_cloud_run_v2_job`. ([#​18855](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18855)) - composer: `ip_allocation_policy = []` in `google_composer_environment` is no longer valid configuration. Removing the field from configuration should not produce a diff. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - compute: added new required field `enabled` in `google_compute_backend_service` and `google_compute_region_backend_service` ([#​18772](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18772)) - compute: changed `certifcate_id` in `google_compute_managed_ssl_certificate` to correctly be output only. ([#​19069](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19069)) - compute: revised and in some cases removed default values of `connection_draining_timeout_sec`, `balancing_mode` and `outlier_detection` in `google_compute_region_backend_service` and `google_compute_backend_service`. ([#​18720](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18720)) - compute: revised the format of `id` for `compute_network_endpoints` ([#​18844](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18844)) - compute: `guest_accelerator = []` is no longer valid configuration in `google_compute_instance`. To explicitly set an empty list of objects, set guest_accelerator.count = 0. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - compute: `google_compute_instance_from_template` and `google_compute_instance_from_machine_image` `network_interface.alias_ip_range, network_interface.access_config, attached_disk, guest_accelerator, service_account, scratch_disk` can no longer be set to an empty block `[]`. Removing the fields from configuration should not produce a diff. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - compute: `secondary_ip_ranges = []` in `google_compute_subnetwork` is no longer valid configuration. To set an explicitly empty list, use `send_secondary_ip_range_if_empty` and completely remove `secondary_ip_range` from config. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - container: made `advanced_datapath_observability_config.enable_relay` required in `google_container_cluster` ([#​19060](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19060)) - container: removed deprecated field `advanced_datapath_observability_config.relay_mode` from `google_container_cluster` resource. Users are expected to use `enable_relay` field instead. ([#​19060](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19060)) - container: three label-related fields are now in `google_container_cluster` resource. `resource_labels` field is non-authoritative and only manages the labels defined by the users on the resource through Terraform. The new output-only `terraform_labels` field merges the labels defined by the users on the resource through Terraform and the default labels configured on the provider. The new output-only `effective_labels` field lists all of labels present on the resource in GCP, including the labels configured through Terraform, the system, and other clients. ([#​19062](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19062)) - container: made three fields `resource_labels`, `terraform_labels`, and `effective_labels` be present in `google_container_cluster` datasources. All three fields will have all of labels present on the resource in GCP including the labels configured through Terraform, the system, and other clients, equivalent to `effective_labels` on the resource. ([#​19062](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19062)) - container: `guest_accelerator = []` is no longer valid configuration in `google_container_cluster` and `google_container_node_pool`. To explicitly set an empty list of objects, set guest_accelerator.count = 0. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - container: `guest_accelerator.gpu_driver_installation_config = []` and `guest_accelerator.gpu_sharing_config = []` are no longer valid configuration in `google_container_cluster` and `google_container_node_pool`. Removing the fields from configuration should not produce a diff. ([#​19207](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19207)) - datastore: removed `google_datastore_index` in favor of `google_firestore_index` ([#​19160](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19160)) - edgenetwork: three label-related fields are now in ` google_edgenetwork_network ` and `google_edgenetwork_subnet` resources. `labels` field is non-authoritative and only manages the labels defined by the users on the resource through Terraform. The new output-only `terraform_labels` field merges the labels defined by the users on the resource through Terraform and the default labels configured on the provider. The new output-only `effective_labels` field lists all of labels present on the resource in GCP, including the labels configured through Terraform, the system, and other clients. ([#​19062](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19062)) - identityplatform: removed resource `google_identity_platform_project_default_config` in favor of `google_identity_platform_project_config` ([#​18992](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18992)) - pubsub: allowed `schema_settings` in `google_pubsub_topic` to be removed ([#​18631](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18631)) - integrations: removed `create_sample_workflows` and `provision_gmek` from `google_integrations_client` ([#​19148](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19148)) - redis: added a `deletion_protection_enabled` field to the `google_redis_cluster` resource. This field defaults to `true`, preventing accidental deletions. To delete the resource, you must first set `deletion_protection_enabled = false` before destroying the resource. ([#​19173](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19173)) - resourcemanager: added `deletion_protection` field to `google_folder` to make deleting them require an explicit intent. Folder resources now cannot be destroyed unless `deletion_protection = false` is set for the resource. ([#​19021](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19021)) - resourcemanager: made `deletion_policy` in `google_project` 'PREVENT' by default. This makes deleting them require an explicit intent. `google_project` resources cannot be destroyed unless `deletion_policy` is set to 'ABANDON' or 'DELETE' for the resource. ([#​19114](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19114)) - sql: removed `settings.ip_configuration.require_ssl` in `google_sql_database_instance`. Please use `settings.ip_configuration.ssl_mode` instead. ([#​18843](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18843)) - storage: removed `no_age` field from `lifecycle_rule.condition` in the `google_storage_bucket` resource ([#​19048](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19048)) - vpcaccess: removed default values for `min_throughput` and `min_instances` fields on `google_vpc_access_connector` and made them default to values returned from the API when not provided by users ([#​18697](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18697)) - vpcaccess: added a conflicting fields restriction between `min_throughput` and `min_instances` fields on `google_vpc_access_connector` ([#​18697](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18697)) - vpcaccess: added a conflicting fields restriction between `max_throughput` and `max_instances` fields on `google_vpc_access_connector` ([#​18697](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18697)) - workstation: defaulted `host.gce_instance.disable_ssh` to true for `google_workstations_workstation_config` ([#​19101](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19101)) IMPROVEMENTS: - compute: added fields `reserved_internal_range` and `secondary_ip_ranges[].reserved_internal_range` to `google_compute_subnetwork` resource ([#​19151](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19151)) - compute: changed the behavior of `name_prefix` in multiple Compute resources to allow for a longer max length of 54 characters. See the upgrade guide and resource documentation for more details. ([#​19152](https://redirect.github.com/hashicorp/terraform-provider-google/pull/19152)) BUG FIXES: - compute: fixed an issue regarding sending `enabled` field by default for null `iap` message in `google_compute_backend_service` and `google_compute_region_backend_service` ([#​18772](https://redirect.github.com/hashicorp/terraform-provider-google/pull/18772))
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: eliasecchig <115624100+eliasecchig@users.noreply.github.com> --- .../deployment/terraform/dev/storage.tf | 2 +- .../e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf index 20d7a1ecce..b0b40e884e 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/dev/storage.tf @@ -3,7 +3,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 3.53.0, < 6.0.0" + version = "< 7.0.0" } } } diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf index a6f994cb98..56fd65d9ff 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/deployment/terraform/storage.tf @@ -3,7 +3,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = ">= 3.53.0, < 6.0.0" + version = "< 7.0.0" } } } From b0c5628885d76c464a0cb4ea1e99df263b38762d Mon Sep 17 00:00:00 2001 From: eliasecchig <115624100+eliasecchig@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:49:13 +0200 Subject: [PATCH 73/76] fix: README.md GenAI starter pack - setup quota project (#1256) # Description quick fix to guide the user to setup their quota project before using the starter pack. --- gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md index dfadbfaef2..8d0190b5fb 100644 --- a/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md +++ b/gemini/sample-apps/e2e-gen-ai-app-starter-pack/README.md @@ -117,6 +117,7 @@ export PROJECT_ID="YOUR_PROJECT_ID" export REGION="YOUR_REGION" gcloud config set project $PROJECT_ID gcloud config set region $REGION +gcloud auth application-default set-quota-project $PROJECT_ID ``` ## Commands From 84da8254e196fe74f0a9aadff7d4e1baf89fe93a Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 14 Oct 2024 16:51:12 +0200 Subject: [PATCH 74/76] chore(deps): update dependency eslint to v9 (#1252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [eslint](https://eslint.org) ([source](https://redirect.github.com/eslint/eslint)) | [`^8.0.0` -> `^9.0.0`](https://renovatebot.com/diffs/npm/eslint/8.57.1/9.12.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/eslint/9.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/eslint/9.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/eslint/8.57.1/9.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/eslint/8.57.1/9.12.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Release Notes
    eslint/eslint (eslint) ### [`v9.12.0`](https://redirect.github.com/eslint/eslint/releases/tag/v9.12.0) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.11.1...v9.12.0) #### Features - [`5a6a053`](https://redirect.github.com/eslint/eslint/commit/5a6a05321ca34480c780be8c2cb7946e4c299001) feat: update to `jiti` v2 ([#​18954](https://redirect.github.com/eslint/eslint/issues/18954)) (Arya Emami) - [`17a07fb`](https://redirect.github.com/eslint/eslint/commit/17a07fb548ecce24b88e8b2b07491c24ed1111a9) feat: Hooks for test cases (RuleTester) ([#​18771](https://redirect.github.com/eslint/eslint/issues/18771)) (Anna Bocharova) - [`2ff0e51`](https://redirect.github.com/eslint/eslint/commit/2ff0e51cedaab967b7ce383437f64b4a6df8608d) feat: Implement alternate config lookup ([#​18742](https://redirect.github.com/eslint/eslint/issues/18742)) (Nicholas C. Zakas) - [`2d17453`](https://redirect.github.com/eslint/eslint/commit/2d174532ae96bcaecf6fd7de78755164378b3a2d) feat: Implement modified cyclomatic complexity ([#​18896](https://redirect.github.com/eslint/eslint/issues/18896)) (Dmitry Pashkevich) #### Bug Fixes - [`ea380ca`](https://redirect.github.com/eslint/eslint/commit/ea380cac6f598c86b25a2726c2783636c4169957) fix: Upgrade retry to avoid EMFILE errors ([#​18986](https://redirect.github.com/eslint/eslint/issues/18986)) (Nicholas C. Zakas) - [`fdd6319`](https://redirect.github.com/eslint/eslint/commit/fdd631964aee250bc5520770bc1fc3f2f2872813) fix: Issues with type definitions ([#​18940](https://redirect.github.com/eslint/eslint/issues/18940)) (Arya Emami) #### Documentation - [`ecbd522`](https://redirect.github.com/eslint/eslint/commit/ecbd52291d7c118b77016c6bf1c60b7d263c44f0) docs: Mention code explorer ([#​18978](https://redirect.github.com/eslint/eslint/issues/18978)) (Nicholas C. Zakas) - [`7ea4ecc`](https://redirect.github.com/eslint/eslint/commit/7ea4ecc6e3320a74c960cb78acc94c0140d15f55) docs: Clarifying the Use of Meta Objects ([#​18697](https://redirect.github.com/eslint/eslint/issues/18697)) (Amaresh S M) - [`d3e4b2e`](https://redirect.github.com/eslint/eslint/commit/d3e4b2ea4a8f76d4d49345c242f013f49635274f) docs: Clarify how to exclude `.js` files ([#​18976](https://redirect.github.com/eslint/eslint/issues/18976)) (Milos Djermanovic) - [`57232ff`](https://redirect.github.com/eslint/eslint/commit/57232ff3d50412586df094f052b47adb38f8d9ae) docs: Mention plugin-kit in language docs ([#​18973](https://redirect.github.com/eslint/eslint/issues/18973)) (Nicholas C. Zakas) - [`b80ed00`](https://redirect.github.com/eslint/eslint/commit/b80ed007cefee086db1ff17cde9f7dd6690459f0) docs: Update README (GitHub Actions Bot) - [`cb69ab3`](https://redirect.github.com/eslint/eslint/commit/cb69ab374c149eb725b2fc5a8f0ff33fd7268a46) docs: Update README (GitHub Actions Bot) - [`7fb0d95`](https://redirect.github.com/eslint/eslint/commit/7fb0d957c102be499d5358a74928e0ea93913371) docs: Update README (GitHub Actions Bot) - [`493348a`](https://redirect.github.com/eslint/eslint/commit/493348a9a5dcca29d7fbbe13c67ce13a7a38413b) docs: Update README (GitHub Actions Bot) - [`87a582c`](https://redirect.github.com/eslint/eslint/commit/87a582c8b537d133c140781aa9e3ff0201a3c10f) docs: fix typo in `id-match` rule ([#​18944](https://redirect.github.com/eslint/eslint/issues/18944)) (Jay) #### Chores - [`555aafd`](https://redirect.github.com/eslint/eslint/commit/555aafd06f6dddc743acff06111dc72dd8ea4c4e) chore: upgrade to `@eslint/js@9.12.0` ([#​18987](https://redirect.github.com/eslint/eslint/issues/18987)) (Francesco Trotta) - [`873ae60`](https://redirect.github.com/eslint/eslint/commit/873ae608c15a0a386f022076b5aab6112b56b59b) chore: package.json update for [@​eslint/js](https://redirect.github.com/eslint/js) release (Jenkins) - [`d0a5414`](https://redirect.github.com/eslint/eslint/commit/d0a5414c30421e5dbe313790502dbf13b9330fef) refactor: replace strip-ansi with native module ([#​18982](https://redirect.github.com/eslint/eslint/issues/18982)) (Cristopher) - [`b827029`](https://redirect.github.com/eslint/eslint/commit/b8270299abe777bb80a065d537aa1d4be74be705) chore: Enable JSON5 linting ([#​18979](https://redirect.github.com/eslint/eslint/issues/18979)) (Milos Djermanovic) - [`8f55ca2`](https://redirect.github.com/eslint/eslint/commit/8f55ca22d94c1b0ff3be323b97949edef8d880b0) chore: Upgrade espree, eslint-visitor-keys, eslint-scope ([#​18962](https://redirect.github.com/eslint/eslint/issues/18962)) (Nicholas C. Zakas) - [`c1a2725`](https://redirect.github.com/eslint/eslint/commit/c1a2725e9c776d6845d94c866c7f7b1fe0315090) chore: update dependency mocha to ^10.7.3 ([#​18945](https://redirect.github.com/eslint/eslint/issues/18945)) (Milos Djermanovic) ### [`v9.11.1`](https://redirect.github.com/eslint/eslint/compare/v9.11.0...69e94597caa92c9b9f4071f8c9ed4a03772fa6de) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.11.0...v9.11.1) ### [`v9.11.0`](https://redirect.github.com/eslint/eslint/releases/tag/v9.11.0) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.10.0...v9.11.0) #### Features - [`ec30c73`](https://redirect.github.com/eslint/eslint/commit/ec30c7349e0bc2c37465a036e8c7ea3318ac2328) feat: add "eslint/universal" to export `Linter` ([#​18883](https://redirect.github.com/eslint/eslint/issues/18883)) (唯然) - [`c591da6`](https://redirect.github.com/eslint/eslint/commit/c591da68d4a96aa28df68f4eff7641f42af82b15) feat: Add language to types ([#​18917](https://redirect.github.com/eslint/eslint/issues/18917)) (Nicholas C. Zakas) - [`492eb8f`](https://redirect.github.com/eslint/eslint/commit/492eb8f34ebbc5c9d1dbfcf4dd06b8dde8d1df74) feat: limit the name given to `ImportSpecifier` in `id-length` ([#​18861](https://redirect.github.com/eslint/eslint/issues/18861)) (Tanuj Kanti) - [`19c6856`](https://redirect.github.com/eslint/eslint/commit/19c685608d134d9120a129cc80c0ba7f8f016aa3) feat: Add `no-useless-constructor` suggestion ([#​18799](https://redirect.github.com/eslint/eslint/issues/18799)) (Jordan Thomson) - [`a48f8c2`](https://redirect.github.com/eslint/eslint/commit/a48f8c29b58c27d87dbf202d55a5770d678d37d6) feat: add type `FormatterFunction`, update `LoadedFormatter` ([#​18872](https://redirect.github.com/eslint/eslint/issues/18872)) (Francesco Trotta) #### Bug Fixes - [`5e5f39b`](https://redirect.github.com/eslint/eslint/commit/5e5f39b82535f59780ce4be56d01fd1466029c25) fix: add missing types for `no-restricted-exports` rule ([#​18914](https://redirect.github.com/eslint/eslint/issues/18914)) (Kristóf Poduszló) - [`8f630eb`](https://redirect.github.com/eslint/eslint/commit/8f630eb5794ef9fe38e0b8f034287650def634bd) fix: add missing types for `no-param-reassign` options ([#​18906](https://redirect.github.com/eslint/eslint/issues/18906)) (Kristóf Poduszló) - [`d715781`](https://redirect.github.com/eslint/eslint/commit/d71578124f14d6da3fa5ab5cc391bb6c9ac3ffcf) fix: add missing types for `no-extra-boolean-cast` options ([#​18902](https://redirect.github.com/eslint/eslint/issues/18902)) (Kristóf Poduszló) - [`2de5742`](https://redirect.github.com/eslint/eslint/commit/2de5742682ec45e24dca9ca7faaa45330497fca9) fix: add missing types for `no-misleading-character-class` options ([#​18905](https://redirect.github.com/eslint/eslint/issues/18905)) (Kristóf Poduszló) - [`c153084`](https://redirect.github.com/eslint/eslint/commit/c153084250673b31bed46e3fe6af7a65b4ce8d6f) fix: add missing types for `no-implicit-coercion` options ([#​18903](https://redirect.github.com/eslint/eslint/issues/18903)) (Kristóf Poduszló) - [`fa11b2e`](https://redirect.github.com/eslint/eslint/commit/fa11b2ede6e5dc1f55dfe4b9b65d9760828900e8) fix: add missing types for `no-empty-function` options ([#​18901](https://redirect.github.com/eslint/eslint/issues/18901)) (Kristóf Poduszló) - [`a0deed1`](https://redirect.github.com/eslint/eslint/commit/a0deed122a9676fab07b903c8d16fbf60b92eadf) fix: add missing types for `camelcase` options ([#​18897](https://redirect.github.com/eslint/eslint/issues/18897)) (Kristóf Poduszló) #### Documentation - [`e4e5709`](https://redirect.github.com/eslint/eslint/commit/e4e570952249d1c4fde59c79a0f49a38490b72c9) docs: correct `prefer-object-has-own` type definition comment ([#​18924](https://redirect.github.com/eslint/eslint/issues/18924)) (Nitin Kumar) - [`91cbd18`](https://redirect.github.com/eslint/eslint/commit/91cbd18c70dee2ef73de8d8e43f2c744fd173934) docs: add unicode abbreviations in no-irregular-whitespace rule ([#​18894](https://redirect.github.com/eslint/eslint/issues/18894)) (Alix Royere) - [`59cfc0f`](https://redirect.github.com/eslint/eslint/commit/59cfc0f1b3bbb62260602579f79bd1c36ab5a00f) docs: clarify `resultsMeta` in `LoadedFormatter` type ([#​18881](https://redirect.github.com/eslint/eslint/issues/18881)) (Milos Djermanovic) - [`adcc50d`](https://redirect.github.com/eslint/eslint/commit/adcc50dbf1fb98c0884f841e2a627796a4490373) docs: Update README (GitHub Actions Bot) - [`4edac1a`](https://redirect.github.com/eslint/eslint/commit/4edac1a325a832804f76602736a86217b40f69ac) docs: Update README (GitHub Actions Bot) #### Build Related - [`959d360`](https://redirect.github.com/eslint/eslint/commit/959d360be597d3112b10590018cd52f1d98712d6) build: Support updates to previous major versions ([#​18871](https://redirect.github.com/eslint/eslint/issues/18871)) (Milos Djermanovic) #### Chores - [`ca21a64`](https://redirect.github.com/eslint/eslint/commit/ca21a64ed0f59adb9dadcef2fc8f7248879edbd3) chore: upgrade [@​eslint/js](https://redirect.github.com/eslint/js)[@​9](https://redirect.github.com/9).11.0 ([#​18927](https://redirect.github.com/eslint/eslint/issues/18927)) (Milos Djermanovic) - [`a10f90a`](https://redirect.github.com/eslint/eslint/commit/a10f90af35aea9ac555b1f33106fbba1027d774e) chore: package.json update for [@​eslint/js](https://redirect.github.com/eslint/js) release (Jenkins) - [`e4e02cc`](https://redirect.github.com/eslint/eslint/commit/e4e02cc6938f38ad5028bb8ad82f52460a18dea5) refactor: Extract processor logic into ProcessorService ([#​18818](https://redirect.github.com/eslint/eslint/issues/18818)) (Nicholas C. Zakas) - [`6d4484d`](https://redirect.github.com/eslint/eslint/commit/6d4484d9c19e4132f3dee948174a543dbbb5d30f) chore: updates for v8.57.1 release (Jenkins) - [`71f37c5`](https://redirect.github.com/eslint/eslint/commit/71f37c5bf04afb704232d312cc6c72c957d1c14e) refactor: use optional chaining when validating config rules ([#​18893](https://redirect.github.com/eslint/eslint/issues/18893)) (lucasrmendonca) - [`2c2805f`](https://redirect.github.com/eslint/eslint/commit/2c2805f8ee0fb1f27f3e442de248f45e5a98a067) chore: Add PR note to all templates ([#​18892](https://redirect.github.com/eslint/eslint/issues/18892)) (Nicholas C. Zakas) - [`7b852ce`](https://redirect.github.com/eslint/eslint/commit/7b852ce59e6ed56931c080aa46ab548fa57feffc) refactor: use `Directive` class from `@eslint/plugin-kit` ([#​18884](https://redirect.github.com/eslint/eslint/issues/18884)) (Milos Djermanovic) - [`d594ddd`](https://redirect.github.com/eslint/eslint/commit/d594ddd2cc9b0c251291ea12fbd14ccd2ee32ac7) chore: update dependency [@​eslint/core](https://redirect.github.com/eslint/core) to ^0.6.0 ([#​18863](https://redirect.github.com/eslint/eslint/issues/18863)) (renovate\[bot]) - [`78b2421`](https://redirect.github.com/eslint/eslint/commit/78b2421e28f29206fe120ae1b03804b1b79e6324) chore: Update change.yml ([#​18882](https://redirect.github.com/eslint/eslint/issues/18882)) (Nicholas C. Zakas) - [`a416f0a`](https://redirect.github.com/eslint/eslint/commit/a416f0a270e922c86e8571e94a30fc87d72fa873) chore: enable `$ExpectType` comments in .ts files ([#​18869](https://redirect.github.com/eslint/eslint/issues/18869)) (Francesco Trotta) ### [`v9.10.0`](https://redirect.github.com/eslint/eslint/compare/v9.9.1...6448f3280f85137b429c1c320da6fb4b48169bd5) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.9.1...v9.10.0) ### [`v9.9.1`](https://redirect.github.com/eslint/eslint/compare/v9.9.0...8781e6f063e56438dc22346504ff637df3f84daf) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.9.0...v9.9.1) ### [`v9.9.0`](https://redirect.github.com/eslint/eslint/releases/tag/v9.9.0) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.8.0...v9.9.0) #### Features - [`41d0206`](https://redirect.github.com/eslint/eslint/commit/41d02066935b987d2e3b13a08680cc74d7067986) feat: Add support for TS config files ([#​18134](https://redirect.github.com/eslint/eslint/issues/18134)) (Arya Emami) - [`3a4eaf9`](https://redirect.github.com/eslint/eslint/commit/3a4eaf921543b1cd5d1df4ea9dec02fab396af2a) feat: add suggestion to `require-await` to remove `async` keyword ([#​18716](https://redirect.github.com/eslint/eslint/issues/18716)) (Dave) #### Documentation - [`9fe068c`](https://redirect.github.com/eslint/eslint/commit/9fe068c60db466277a785434496a8f90a9090bed) docs: how to author plugins with configs that extend other configs ([#​18753](https://redirect.github.com/eslint/eslint/issues/18753)) (Alec Gibson) - [`48117b2`](https://redirect.github.com/eslint/eslint/commit/48117b27e98639ffe7e78a230bfad9a93039fb7f) docs: add version support page in the side navbar ([#​18738](https://redirect.github.com/eslint/eslint/issues/18738)) (Amaresh S M) - [`fec2951`](https://redirect.github.com/eslint/eslint/commit/fec2951d58c704c57bea7e89ffde119e4dc621e3) docs: add version support page to the dropdown ([#​18730](https://redirect.github.com/eslint/eslint/issues/18730)) (Amaresh S M) - [`38a0661`](https://redirect.github.com/eslint/eslint/commit/38a0661872dd6f1db2f53501895c58e8cf4e8064) docs: Fix typo ([#​18735](https://redirect.github.com/eslint/eslint/issues/18735)) (Zaina Al Habash) - [`3c32a9e`](https://redirect.github.com/eslint/eslint/commit/3c32a9e23c270d83bd8b2649e78aabb76992928e) docs: Update yarn command for creating ESLint config ([#​18739](https://redirect.github.com/eslint/eslint/issues/18739)) (Temitope Ogunleye) - [`f9ac978`](https://redirect.github.com/eslint/eslint/commit/f9ac978de629c9a702febcf478a743c5ab11fcf6) docs: Update README (GitHub Actions Bot) #### Chores - [`461b2c3`](https://redirect.github.com/eslint/eslint/commit/461b2c35786dc5fd5e146f370bdcafd32938386f) chore: upgrade to `@eslint/js@9.9.0` ([#​18765](https://redirect.github.com/eslint/eslint/issues/18765)) (Francesco Trotta) - [`59dba1b`](https://redirect.github.com/eslint/eslint/commit/59dba1b3404391f5d968be578f0205569d5d41b2) chore: package.json update for [@​eslint/js](https://redirect.github.com/eslint/js) release (Jenkins) - [`fea8563`](https://redirect.github.com/eslint/eslint/commit/fea8563d3372a663aa7a1a676290c34cfb8452ba) chore: update dependency [@​eslint/core](https://redirect.github.com/eslint/core) to ^0.3.0 ([#​18724](https://redirect.github.com/eslint/eslint/issues/18724)) (renovate\[bot]) - [`aac191e`](https://redirect.github.com/eslint/eslint/commit/aac191e6701495666c264f71fc440207ea19251f) chore: update dependency [@​eslint/json](https://redirect.github.com/eslint/json) to ^0.3.0 ([#​18760](https://redirect.github.com/eslint/eslint/issues/18760)) (renovate\[bot]) - [`b97fa05`](https://redirect.github.com/eslint/eslint/commit/b97fa051375d1a4592faf251c783691d0b0b9ab9) chore: update wdio dependencies for more stable tests ([#​18759](https://redirect.github.com/eslint/eslint/issues/18759)) (Christian Bromann) ### [`v9.8.0`](https://redirect.github.com/eslint/eslint/compare/v9.7.0...63881dc11299aba1d0960747c199a4cf48d6b9c8) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.7.0...v9.8.0) ### [`v9.7.0`](https://redirect.github.com/eslint/eslint/releases/tag/v9.7.0) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.6.0...v9.7.0) #### Features - [`7bd9839`](https://redirect.github.com/eslint/eslint/commit/7bd98398f112da020eddcda2c26cf4cc563af004) feat: add support for es2025 duplicate named capturing groups ([#​18630](https://redirect.github.com/eslint/eslint/issues/18630)) (Yosuke Ota) - [`1381394`](https://redirect.github.com/eslint/eslint/commit/1381394a75b5902ce588455765a3919e2f138a7a) feat: add `regex` option in `no-restricted-imports` ([#​18622](https://redirect.github.com/eslint/eslint/issues/18622)) (Nitin Kumar) #### Bug Fixes - [`14e9f81`](https://redirect.github.com/eslint/eslint/commit/14e9f81ccdb51d2b915b68f442d48ced0a691646) fix: destructuring in catch clause in `no-unused-vars` ([#​18636](https://redirect.github.com/eslint/eslint/issues/18636)) (Francesco Trotta) #### Documentation - [`9f416db`](https://redirect.github.com/eslint/eslint/commit/9f416db680ad01716a769296085bf3eb93f76424) docs: Add Powered by Algolia label to the search. ([#​18633](https://redirect.github.com/eslint/eslint/issues/18633)) (Amaresh S M) - [`c8d26cb`](https://redirect.github.com/eslint/eslint/commit/c8d26cb4a2f9d89bfc1914167d3e9f1d3314ffe7) docs: Open JS Foundation -> OpenJS Foundation ([#​18649](https://redirect.github.com/eslint/eslint/issues/18649)) (Milos Djermanovic) - [`6e79ac7`](https://redirect.github.com/eslint/eslint/commit/6e79ac76f44b34c24a3e92c20713fbafe1dcbae2) docs: `loadESLint` does not support option `cwd` ([#​18641](https://redirect.github.com/eslint/eslint/issues/18641)) (Francesco Trotta) #### Chores - [`793b718`](https://redirect.github.com/eslint/eslint/commit/793b7180119e7e440d685defb2ee01597574ef1e) chore: upgrade [@​eslint/js](https://redirect.github.com/eslint/js)[@​9](https://redirect.github.com/9).7.0 ([#​18680](https://redirect.github.com/eslint/eslint/issues/18680)) (Francesco Trotta) - [`7ed6f9a`](https://redirect.github.com/eslint/eslint/commit/7ed6f9a4db702bbad941422f456451a8dba7a450) chore: package.json update for [@​eslint/js](https://redirect.github.com/eslint/js) release (Jenkins) - [`7bcda76`](https://redirect.github.com/eslint/eslint/commit/7bcda760369c44d0f1131fccaaf1ccfed5af85f1) refactor: Add type references ([#​18652](https://redirect.github.com/eslint/eslint/issues/18652)) (Nicholas C. Zakas) - [`51bf57c`](https://redirect.github.com/eslint/eslint/commit/51bf57c493a65baeee3a935f2d0e52e27271fb48) chore: add tech sponsors through actions ([#​18624](https://redirect.github.com/eslint/eslint/issues/18624)) (Strek) - [`6320732`](https://redirect.github.com/eslint/eslint/commit/6320732c3e2a52a220552e348108c53c60f9ef7a) refactor: don't use `parent` property in `NodeEventGenerator` ([#​18653](https://redirect.github.com/eslint/eslint/issues/18653)) (Milos Djermanovic) - [`9e6d640`](https://redirect.github.com/eslint/eslint/commit/9e6d6405c3ee774c2e716a3453ede9696ced1be7) refactor: move "Parsing error" prefix adding to Linter ([#​18650](https://redirect.github.com/eslint/eslint/issues/18650)) (Milos Djermanovic) ### [`v9.6.0`](https://redirect.github.com/eslint/eslint/compare/v9.5.0...473d1bb7c3dfcf629ac048ca811f4b5eef04a692) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.5.0...v9.6.0) ### [`v9.5.0`](https://redirect.github.com/eslint/eslint/compare/v9.4.0...535235701fb029db6f656bd4e58acdfdeb8c02de) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.4.0...v9.5.0) ### [`v9.4.0`](https://redirect.github.com/eslint/eslint/compare/v9.3.0...a5f7e589eca05a8a30bd2532380c304759cc8225) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.3.0...v9.4.0) ### [`v9.3.0`](https://redirect.github.com/eslint/eslint/compare/v9.2.0...41a871cf43874e2f27ad08554c7900daf0e94b06) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.2.0...v9.3.0) ### [`v9.2.0`](https://redirect.github.com/eslint/eslint/compare/v9.1.1...271e7ab1adc45a7b2f66cfea55a54e6048d9749a) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.1.1...v9.2.0) ### [`v9.1.1`](https://redirect.github.com/eslint/eslint/compare/v9.1.0...b4d2512809a1b28466ad1ce5af9d01c181b9bf9e) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.1.0...v9.1.1) ### [`v9.1.0`](https://redirect.github.com/eslint/eslint/compare/v9.0.0...b78d831e244171c939279b03be519b5c13836fce) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v9.0.0...v9.1.0) ### [`v9.0.0`](https://redirect.github.com/eslint/eslint/compare/v8.57.0...e0cbc50179adac1670f4e0bd9093387a51f4f42a) [Compare Source](https://redirect.github.com/eslint/eslint/compare/v8.57.1...v9.0.0)
    --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). --- search/grounded-generation-playground/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/search/grounded-generation-playground/package.json b/search/grounded-generation-playground/package.json index 8599da9309..109ff4fb44 100644 --- a/search/grounded-generation-playground/package.json +++ b/search/grounded-generation-playground/package.json @@ -52,7 +52,7 @@ "@types/react-dom": "^18", "@typescript-eslint/eslint-plugin": "^8.5.0", "@typescript-eslint/parser": "^8.5.0", - "eslint": "^8.0.0", + "eslint": "^9.0.0", "eslint-config-next": "^14.2.14", "eslint-config-prettier": "^9.1.0", "eslint-plugin-prettier": "^5.2.1", From a05bf179cae0b59515e00a3ce108273444e9b3c3 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 14 Oct 2024 16:51:25 +0200 Subject: [PATCH 75/76] chore(deps): update dependency @types/node to v22 (#1251) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [@types/node](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/node) ([source](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node)) | [`^20.16.5` -> `^22.0.0`](https://renovatebot.com/diffs/npm/@types%2fnode/20.16.5/22.7.5) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@types%2fnode/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@types%2fnode/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@types%2fnode/20.16.5/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@types%2fnode/20.16.5/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com> --- search/grounded-generation-playground/package-lock.json | 8 ++++---- search/grounded-generation-playground/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/search/grounded-generation-playground/package-lock.json b/search/grounded-generation-playground/package-lock.json index b00752b29f..fc680b0c0e 100644 --- a/search/grounded-generation-playground/package-lock.json +++ b/search/grounded-generation-playground/package-lock.json @@ -40,7 +40,7 @@ "@tailwindcss/typography": "^0.5.15", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/node": "^20.16.5", + "@types/node": "^22.0.0", "@types/node-fetch": "^2.6.11", "@types/react": "^18", "@types/react-dom": "^18", @@ -3062,9 +3062,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "20.16.5", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.16.5.tgz", - "integrity": "sha512-VwYCweNo3ERajwy0IUlqqcyZ8/A7Zwa9ZP3MnENWcB11AejO+tLy3pu850goUW2FC/IJMdZUfKpX/yxL1gymCA==", + "version": "22.7.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.7.5.tgz", + "integrity": "sha512-jML7s2NAzMWc//QSJ1a3prpk78cOPchGvXJsC3C6R6PSMoooztvRVQEz89gmBTBY1SPMaqo5teB4uNHPdetShQ==", "dev": true, "license": "MIT", "dependencies": { diff --git a/search/grounded-generation-playground/package.json b/search/grounded-generation-playground/package.json index 109ff4fb44..f2a10a26a2 100644 --- a/search/grounded-generation-playground/package.json +++ b/search/grounded-generation-playground/package.json @@ -46,7 +46,7 @@ "@tailwindcss/typography": "^0.5.15", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/node": "^20.16.5", + "@types/node": "^22.0.0", "@types/node-fetch": "^2.6.11", "@types/react": "^18", "@types/react-dom": "^18", From e62141b775ec6f79f4a353b72473dea66bd39869 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 14 Oct 2024 16:52:38 +0200 Subject: [PATCH 76/76] chore(deps): update dependency @types/node to v22 (#1257) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [@types/node](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/node) ([source](https://redirect.github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node)) | [`^20.16.5` -> `^22.0.0`](https://renovatebot.com/diffs/npm/@types%2fnode/20.16.5/22.7.5) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@types%2fnode/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@types%2fnode/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@types%2fnode/20.16.5/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@types%2fnode/20.16.5/22.7.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the warning logs for more information. --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/GoogleCloudPlatform/generative-ai). Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com>

    + + + {/* Main content */} +
    +
    +
    +
    +

    + Vertex AI Search Grounded Generation Playground +

    +
    +
    +
    +
    +
    +
    + + + + Chat + + + Comparison + + + About + + + + {/* Chat Interface */} + + {messages.map((message, index) => ( +
    +
    +
    + +
    + + + {message.role === 'user' ? ( + + {message.content} + + ) : ( + + )} + + + {message.role === 'user' && ( +
    + +
    + )} +
    +
    + ))} + {messages.length === 0 ? ( + + ) : ( + '' + )} + {isStreaming && ( +
    +
    + + + Thinking... +
    +
    + )} + +
    + +
    +
    + setInputMessage(e.target.value)} + className="flex-1 h-10 bg-zinc-800 text-white border-zinc-500 focus:border-blue-500 focus:ring-2 focus:ring-blue-500" + /> + + +
    +
    + + + {/* Comparison Interface */} +
    + {/* Greeting and Example Queries */} + + {/* Search bar */} +
    +
    + setInputMessage(e.target.value)} + className="flex-1 h-10 bg-zinc-800 text-white border-zinc-500 focus:border-blue-500 focus:ring-2 focus:ring-blue-500" + /> + +
    +
    + + {/* Response containers */} + {showResponses && ( +
    +
    +

    + Grounded Response +

    +
    + {responses.grounded.text ? ( + + ) : ( +
    + + + Thinking... +
    + )} +
    +
    +
    +

    + Ungrounded Response +

    +
    + {responses.ungrounded.text ? ( + + {responses.ungrounded.text} + + ) : ( +
    + + + Thinking... +
    + )} +
    +
    +
    + )} +
    +
    + + {/* About Interface */} +
    +
    +
    + +
    +
    + + +
    +
    +
    +
    +