diff --git a/ai_ref_kits/agentic_llm_rag/requirements.txt b/ai_ref_kits/agentic_llm_rag/requirements.txt index 48188cd4..d2ef427e 100644 --- a/ai_ref_kits/agentic_llm_rag/requirements.txt +++ b/ai_ref_kits/agentic_llm_rag/requirements.txt @@ -13,7 +13,8 @@ llama-index-vector-stores-faiss==0.3.0 faiss-cpu==1.9.0 # onnx>1.16.1 doesn't work on windows -onnx==1.16.1 +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" onnxruntime==1.17.3 torch==2.5.1 diff --git a/ai_ref_kits/automated_self_checkout/requirements.txt b/ai_ref_kits/automated_self_checkout/requirements.txt index 61581f76..6a49b5f7 100644 --- a/ai_ref_kits/automated_self_checkout/requirements.txt +++ b/ai_ref_kits/automated_self_checkout/requirements.txt @@ -1,7 +1,9 @@ openvino-dev==2023.0.0 nncf==2.5.0 ultralytics==8.0.117 -onnx==1.16.0 +# onnx>1.16.1 doesn't work on windows +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" supervision==0.16.0 jupyterlab==4.2.5 pycocotools==2.0.6 \ No newline at end of file diff --git a/ai_ref_kits/conversational_ai_chatbot/README.md b/ai_ref_kits/conversational_ai_chatbot/README.md index 43822aa7..af548371 100644 --- a/ai_ref_kits/conversational_ai_chatbot/README.md +++ b/ai_ref_kits/conversational_ai_chatbot/README.md @@ -189,10 +189,10 @@ For the python script, you must include the following model directory arguments. - `--reranker_model path/to/reranker_model`: The path to your reranker model directory (for example, `model/bge-reranker-large-FP32`). This model reranks responses to ensure relevance and accuracy. -- `--personality path/to/personality.yaml`: The path to your custom personality YAML file (for example, `concierge_personality.yaml`). +- `--personality path/to/personality.yaml`: The path to your custom personality YAML file (for example, `config/concierge_personality.yaml`). This file defines the assistant's personality, including instructions, system configuration, and greeting prompts. You can create and specify your own custom personality file. -- `--example_pdf path/to/personality.yaml`: The path to your custom PDF file which is an additional context (for example, `Grand_Azure_Resort_Spa_Full_Guide.pdf`). +- `--example_pdf path/to/personality.yaml`: The path to your custom PDF file which is an additional context (for example, `data/Grand_Azure_Resort_Spa_Full_Guide.pdf`). This file defines the knowledge of the resort in this concierge use case. You can use your own custom file to build a local knowledge base. - `--public`: Include this flag to make the Gradio interface publicly accessible over the network. Without this flag, the interface is only available on your local machine. @@ -204,8 +204,8 @@ python app.py \ --chat_model path/to/chat_model \ --embedding_model path/to/embedding_model \ --reranker_model path/to/reranker_model \ - --personality concierge_personality.yaml \ - --example_pdf Grand_Azure_Resort_Spa_Full_Guide.pdf \ + --personality config/concierge_personality.yaml \ + --example_pdf data/Grand_Azure_Resort_Spa_Full_Guide.pdf \ --public ``` diff --git a/ai_ref_kits/conversational_ai_chatbot/app.py b/ai_ref_kits/conversational_ai_chatbot/app.py index 43b96cf6..bf7bef96 100644 --- a/ai_ref_kits/conversational_ai_chatbot/app.py +++ b/ai_ref_kits/conversational_ai_chatbot/app.py @@ -465,8 +465,8 @@ def run(asr_model_dir: Path, chat_model_dir: Path, embedding_model_dir: Path, re parser.add_argument("--chat_model", type=str, default="model/llama3.1-8B-INT4", help="Path to the chat model directory") parser.add_argument("--embedding_model", type=str, default="model/bge-small-FP32", help="Path to the embedding model directory") parser.add_argument("--reranker_model", type=str, default="model/bge-reranker-large-FP32", help="Path to the reranker model directory") - parser.add_argument("--personality", type=str, default="concierge_personality.yaml", help="Path to the YAML file with chatbot personality") - parser.add_argument("--example_pdf", type=str, default="Grand_Azure_Resort_Spa_Full_Guide.pdf", help="Path to the PDF file which is an additional context") + parser.add_argument("--personality", type=str, default="config/concierge_personality.yaml", help="Path to the YAML file with chatbot personality") + parser.add_argument("--example_pdf", type=str, default="data/Grand_Azure_Resort_Spa_Full_Guide.pdf", help="Path to the PDF file which is an additional context") parser.add_argument("--public", default=False, action="store_true", help="Whether interface should be available publicly") args = parser.parse_args() diff --git a/ai_ref_kits/conversational_ai_chatbot/concierge_personality.yaml b/ai_ref_kits/conversational_ai_chatbot/config/concierge_personality.yaml similarity index 100% rename from ai_ref_kits/conversational_ai_chatbot/concierge_personality.yaml rename to ai_ref_kits/conversational_ai_chatbot/config/concierge_personality.yaml diff --git a/ai_ref_kits/conversational_ai_chatbot/Grand_Azure_Resort_Spa_Full_Guide.pdf b/ai_ref_kits/conversational_ai_chatbot/data/Grand_Azure_Resort_Spa_Full_Guide.pdf similarity index 100% rename from ai_ref_kits/conversational_ai_chatbot/Grand_Azure_Resort_Spa_Full_Guide.pdf rename to ai_ref_kits/conversational_ai_chatbot/data/Grand_Azure_Resort_Spa_Full_Guide.pdf diff --git a/ai_ref_kits/conversational_ai_chatbot/requirements.txt b/ai_ref_kits/conversational_ai_chatbot/requirements.txt index 2d27e386..c2bbf1ea 100644 --- a/ai_ref_kits/conversational_ai_chatbot/requirements.txt +++ b/ai_ref_kits/conversational_ai_chatbot/requirements.txt @@ -14,7 +14,8 @@ langchain-text-splitters==0.3.4 faiss-cpu==1.9.0 # onnx>1.16.1 doesn't work on windows -onnx==1.16.1 +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" onnxruntime==1.17.3 torch==2.5.1 torchaudio==2.5.1 diff --git a/ai_ref_kits/multimodal_ai_visual_generator/README.md b/ai_ref_kits/multimodal_ai_visual_generator/README.md index 0aed1134..36443c34 100644 --- a/ai_ref_kits/multimodal_ai_visual_generator/README.md +++ b/ai_ref_kits/multimodal_ai_visual_generator/README.md @@ -84,7 +84,7 @@ Next, you’ll download and optimize the required models. This will involve the ```shell python3 -m venv model_installation_venv source model_installation_venv/bin/activate -pip install -r python3.12_requirements_model_installation.txt +pip install -r requirements.txt python3 download_and_prepare_models.py ``` After model installation, you can remove the `model_installation_venv` virtual environment as it is no longer needed. diff --git a/ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements_model_installation.txt b/ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements_model_installation.txt deleted file mode 100644 index 8795d107..00000000 --- a/ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements_model_installation.txt +++ /dev/null @@ -1,100 +0,0 @@ -about-time==4.2.1 -aiohttp==3.10.11 -aiosignal==1.3.1 -alive-progress==3.1.5 -attrs==23.2.0 -autograd==1.6.2 -certifi==2024.7.4 -charset-normalizer==3.3.2 -cma==3.2.2 -colorama==0.4.6 -coloredlogs==15.0.1 -contourpy==1.2.1 -cycler==0.12.1 -datasets==2.20.0 -defusedxml==0.7.1 -Deprecated==1.2.14 -diffusers==0.29.0 -dill==0.3.8 -filelock==3.15.3 -fonttools==4.53.0 -frozenlist==1.4.1 -fsspec==2024.5.0 -future==1.0.0 -grapheme==0.6.0 -huggingface-hub==0.23.4 -humanfriendly==10.0 -idna==3.7 -importlib_metadata==7.1.0 -intel-openmp==2021.4.0 -Jinja2==3.1.5 -joblib==1.4.2 -jsonschema==4.22.0 -jsonschema-specifications==2023.12.1 -jstyleson==0.0.2 -kiwisolver==1.4.5 -llvmlite==0.43.0 -markdown-it-py==3.0.0 -MarkupSafe==2.1.5 -matplotlib==3.9.0 -mdurl==0.1.2 -mkl==2021.4.0 -more-itertools==10.3.0 -mpmath==1.3.0 -multidict==6.0.5 -multiprocess==0.70.16 -natsort==8.4.0 -networkx==3.1 -ninja==1.11.1.1 -nncf==2.10.0 -numba==0.60.0 -numpy==1.26.4 -onnx==1.16.0 -openai-whisper==20231117 -openvino==2024.2.0 -openvino-dev==2024.2.0 -openvino-telemetry==2024.1.0 -optimum==1.20.0 -optimum-intel==1.17.0 -packaging==24.1 -pandas==2.2.2 -pillow==10.3.0 -protobuf==5.27.1 -psutil==6.0.0 -pyarrow==16.1.0 -pyarrow-hotfix==0.6 -pydot==2.0.0 -Pygments==2.18.0 -pymoo==0.6.1.1 -pyparsing==3.1.2 -pyreadline3==3.4.1 -python-dateutil==2.9.0.post0 -pytz==2024.1 -PyYAML==6.0.1 -referencing==0.35.1 -regex==2024.5.15 -requests==2.32.3 -rich==13.7.1 -rpds-py==0.18.1 -safetensors==0.4.3 -scikit-learn==1.5.0 -scipy==1.13.1 -sentencepiece==0.2.0 -setuptools==70.1.0 -six==1.16.0 -sympy==1.12.1 -tabulate==0.9.0 -tbb==2021.12.0 -threadpoolctl==3.5.0 -tiktoken==0.7.0 -tokenizers==0.19.1 -torch==2.3.1+cpu -tqdm==4.66.4 -transformers==4.41.2 -typing_extensions==4.12.2 -tzdata==2024.1 -urllib3==2.2.2 -wrapt==1.16.0 -xxhash==3.4.1 -yarl==1.9.4 -zipp==3.19.2 \ No newline at end of file diff --git a/ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements.txt b/ai_ref_kits/multimodal_ai_visual_generator/requirements.txt similarity index 85% rename from ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements.txt rename to ai_ref_kits/multimodal_ai_visual_generator/requirements.txt index f40b844f..571e4504 100644 --- a/ai_ref_kits/multimodal_ai_visual_generator/python3.12_requirements.txt +++ b/ai_ref_kits/multimodal_ai_visual_generator/requirements.txt @@ -11,7 +11,9 @@ openai-whisper==20240930 # deep learning frameworks tensorflow==2.17.0 -onnx==1.16.1 +# onnx>1.16.1 doesn't work on windows +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" --extra-index-url https://download.pytorch.org/whl/cpu torch==2.4.1 torchvision==0.19.1 diff --git a/notebooks/onnxruntime_lcm/requirements.txt b/notebooks/onnxruntime_lcm/requirements.txt index f7f4e9b8..5052d761 100644 --- a/notebooks/onnxruntime_lcm/requirements.txt +++ b/notebooks/onnxruntime_lcm/requirements.txt @@ -4,7 +4,9 @@ jupyterlab==4.2.5 ipywidgets==8.1.5 openvino==2024.4.0 -onnx==1.16.1 +# onnx>1.16.1 doesn't work on windows +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" onnxruntime-openvino==1.20.0 optimum==1.22.0 accelerate==0.33.0 diff --git a/notebooks/onnxruntime_yolov8/requirements.txt b/notebooks/onnxruntime_yolov8/requirements.txt index 2b3086ef..6d6ba81d 100644 --- a/notebooks/onnxruntime_yolov8/requirements.txt +++ b/notebooks/onnxruntime_yolov8/requirements.txt @@ -5,5 +5,7 @@ jupyterlab==4.2.5 openvino==2024.5.0 ultralytics==8.3.38 onnxruntime-openvino==1.20.0 -onnx==1.16.1 +# onnx>1.16.1 doesn't work on windows +onnx==1.16.1; platform_system == "Windows" +onnx==1.17.0; platform_system != "Windows" setuptools==73.0.1