From a05eff34a9f13c39259a7626ff7b0b166c4cf373 Mon Sep 17 00:00:00 2001 From: pablodanswer Date: Wed, 18 Dec 2024 14:25:48 -0800 Subject: [PATCH 1/5] ensure password validation errors propagate --- backend/onyx/auth/users.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/onyx/auth/users.py b/backend/onyx/auth/users.py index 69f78f50ae3..f7d029e9553 100644 --- a/backend/onyx/auth/users.py +++ b/backend/onyx/auth/users.py @@ -228,6 +228,8 @@ async def create( safe: bool = False, request: Optional[Request] = None, ) -> User: + await self.validate_password(user_create.password, user_create) + user_count: int | None = None referral_source = ( request.cookies.get("referral_source", None) From 493693625493fac4eac18b6957431eb6ffae3011 Mon Sep 17 00:00:00 2001 From: pablodanswer Date: Wed, 18 Dec 2024 14:27:04 -0800 Subject: [PATCH 2/5] copy update --- backend/onyx/auth/users.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/onyx/auth/users.py b/backend/onyx/auth/users.py index f7d029e9553..2b1a489c9ad 100644 --- a/backend/onyx/auth/users.py +++ b/backend/onyx/auth/users.py @@ -228,6 +228,7 @@ async def create( safe: bool = False, request: Optional[Request] = None, ) -> User: + # We verify the password here to make sure it's valid before we proceed await self.validate_password(user_create.password, user_create) user_count: int | None = None From 9b7aa6008c773d987705cf5509eafba37b567319 Mon Sep 17 00:00:00 2001 From: pablodanswer Date: Wed, 18 Dec 2024 15:23:32 -0800 Subject: [PATCH 3/5] support o1 --- backend/onyx/llm/chat_llm.py | 4 +++- backend/onyx/llm/llm_provider_options.py | 1 + backend/requirements/default.txt | 5 +++-- backend/requirements/model_server.txt | 2 +- web/src/lib/hooks.ts | 1 + 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/backend/onyx/llm/chat_llm.py b/backend/onyx/llm/chat_llm.py index 32f8684b4e7..66e65415b18 100644 --- a/backend/onyx/llm/chat_llm.py +++ b/backend/onyx/llm/chat_llm.py @@ -453,7 +453,9 @@ def _stream_implementation( if LOG_DANSWER_MODEL_INTERACTIONS: self.log_model_configs() - if DISABLE_LITELLM_STREAMING: + if ( + DISABLE_LITELLM_STREAMING or self.config.model_name == "o1-2024-12-17" + ): # TODO: remove once litellm supports streaming yield self.invoke(prompt, tools, tool_choice, structured_response_format) return diff --git a/backend/onyx/llm/llm_provider_options.py b/backend/onyx/llm/llm_provider_options.py index cf562ee5a27..d42ff3a5e2d 100644 --- a/backend/onyx/llm/llm_provider_options.py +++ b/backend/onyx/llm/llm_provider_options.py @@ -29,6 +29,7 @@ class WellKnownLLMProviderDescriptor(BaseModel): OPEN_AI_MODEL_NAMES = [ "o1-mini", "o1-preview", + "o1-2024-12-17", "gpt-4", "gpt-4o", "gpt-4o-mini", diff --git a/backend/requirements/default.txt b/backend/requirements/default.txt index 3a4996d9014..9e01349544e 100644 --- a/backend/requirements/default.txt +++ b/backend/requirements/default.txt @@ -29,7 +29,7 @@ trafilatura==1.12.2 langchain==0.1.17 langchain-core==0.1.50 langchain-text-splitters==0.0.1 -litellm==1.54.1 +litellm==1.55.4 lxml==5.3.0 lxml_html_clean==0.2.2 llama-index==0.9.45 @@ -81,4 +81,5 @@ stripe==10.12.0 urllib3==2.2.3 mistune==0.8.4 sentry-sdk==2.14.0 -prometheus_client==0.21.0 \ No newline at end of file +prometheus_client==0.21.0 +markitdown=0.0.1a3 \ No newline at end of file diff --git a/backend/requirements/model_server.txt b/backend/requirements/model_server.txt index 531382cb4b1..ac97e64aebe 100644 --- a/backend/requirements/model_server.txt +++ b/backend/requirements/model_server.txt @@ -12,5 +12,5 @@ torch==2.2.0 transformers==4.39.2 uvicorn==0.21.1 voyageai==0.2.3 -litellm==1.54.1 +litellm==1.55.4 sentry-sdk[fastapi,celery,starlette]==2.14.0 \ No newline at end of file diff --git a/web/src/lib/hooks.ts b/web/src/lib/hooks.ts index 6c7592ea1b6..81e83312264 100644 --- a/web/src/lib/hooks.ts +++ b/web/src/lib/hooks.ts @@ -299,6 +299,7 @@ const MODEL_DISPLAY_NAMES: { [key: string]: string } = { // OpenAI models "o1-mini": "O1 Mini", "o1-preview": "O1 Preview", + "o1-2024-12-17": "O1", "gpt-4": "GPT 4", "gpt-4o": "GPT 4o", "gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)", From 91f574f10760064663a9f18fe49c07aa98cd5c3b Mon Sep 17 00:00:00 2001 From: pablodanswer Date: Wed, 18 Dec 2024 15:30:12 -0800 Subject: [PATCH 4/5] improve typing --- backend/onyx/auth/users.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/backend/onyx/auth/users.py b/backend/onyx/auth/users.py index 2b1a489c9ad..69025aab198 100644 --- a/backend/onyx/auth/users.py +++ b/backend/onyx/auth/users.py @@ -5,6 +5,7 @@ from datetime import timezone from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from typing import cast from typing import Dict from typing import List from typing import Optional @@ -229,7 +230,9 @@ async def create( request: Optional[Request] = None, ) -> User: # We verify the password here to make sure it's valid before we proceed - await self.validate_password(user_create.password, user_create) + await self.validate_password( + user_create.password, cast(schemas.UC, user_create) + ) user_count: int | None = None referral_source = ( From dc093679ab98104e6185894898000f96a10a3e66 Mon Sep 17 00:00:00 2001 From: pablodanswer Date: Wed, 18 Dec 2024 15:30:31 -0800 Subject: [PATCH 5/5] Revert "support o1" This reverts commit 9b7aa6008c773d987705cf5509eafba37b567319. --- backend/onyx/llm/chat_llm.py | 4 +--- backend/onyx/llm/llm_provider_options.py | 1 - backend/requirements/default.txt | 5 ++--- backend/requirements/model_server.txt | 2 +- web/src/lib/hooks.ts | 1 - 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/backend/onyx/llm/chat_llm.py b/backend/onyx/llm/chat_llm.py index 66e65415b18..32f8684b4e7 100644 --- a/backend/onyx/llm/chat_llm.py +++ b/backend/onyx/llm/chat_llm.py @@ -453,9 +453,7 @@ def _stream_implementation( if LOG_DANSWER_MODEL_INTERACTIONS: self.log_model_configs() - if ( - DISABLE_LITELLM_STREAMING or self.config.model_name == "o1-2024-12-17" - ): # TODO: remove once litellm supports streaming + if DISABLE_LITELLM_STREAMING: yield self.invoke(prompt, tools, tool_choice, structured_response_format) return diff --git a/backend/onyx/llm/llm_provider_options.py b/backend/onyx/llm/llm_provider_options.py index d42ff3a5e2d..cf562ee5a27 100644 --- a/backend/onyx/llm/llm_provider_options.py +++ b/backend/onyx/llm/llm_provider_options.py @@ -29,7 +29,6 @@ class WellKnownLLMProviderDescriptor(BaseModel): OPEN_AI_MODEL_NAMES = [ "o1-mini", "o1-preview", - "o1-2024-12-17", "gpt-4", "gpt-4o", "gpt-4o-mini", diff --git a/backend/requirements/default.txt b/backend/requirements/default.txt index 9e01349544e..3a4996d9014 100644 --- a/backend/requirements/default.txt +++ b/backend/requirements/default.txt @@ -29,7 +29,7 @@ trafilatura==1.12.2 langchain==0.1.17 langchain-core==0.1.50 langchain-text-splitters==0.0.1 -litellm==1.55.4 +litellm==1.54.1 lxml==5.3.0 lxml_html_clean==0.2.2 llama-index==0.9.45 @@ -81,5 +81,4 @@ stripe==10.12.0 urllib3==2.2.3 mistune==0.8.4 sentry-sdk==2.14.0 -prometheus_client==0.21.0 -markitdown=0.0.1a3 \ No newline at end of file +prometheus_client==0.21.0 \ No newline at end of file diff --git a/backend/requirements/model_server.txt b/backend/requirements/model_server.txt index ac97e64aebe..531382cb4b1 100644 --- a/backend/requirements/model_server.txt +++ b/backend/requirements/model_server.txt @@ -12,5 +12,5 @@ torch==2.2.0 transformers==4.39.2 uvicorn==0.21.1 voyageai==0.2.3 -litellm==1.55.4 +litellm==1.54.1 sentry-sdk[fastapi,celery,starlette]==2.14.0 \ No newline at end of file diff --git a/web/src/lib/hooks.ts b/web/src/lib/hooks.ts index 81e83312264..6c7592ea1b6 100644 --- a/web/src/lib/hooks.ts +++ b/web/src/lib/hooks.ts @@ -299,7 +299,6 @@ const MODEL_DISPLAY_NAMES: { [key: string]: string } = { // OpenAI models "o1-mini": "O1 Mini", "o1-preview": "O1 Preview", - "o1-2024-12-17": "O1", "gpt-4": "GPT 4", "gpt-4o": "GPT 4o", "gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)",