From fe3ef9647a61cc0c51f5367ac5bcb08d5504b600 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:51:38 +0300 Subject: [PATCH 01/46] Create unify.py --- semantic_router/llms/unify.py | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 semantic_router/llms/unify.py diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py new file mode 100644 index 00000000..443e78f9 --- /dev/null +++ b/semantic_router/llms/unify.py @@ -0,0 +1,46 @@ +import os +from typing import Dict, List, Optional + +import unify + +from semantic_router.llms import BaseLLM +from semantic_router.schema import Message + +from unify.utils import _validate_api_key +from unify.exceptions import UnifyError +from unify.clients import Unify, AsyncUnify + +class UnifyLLM(BaseLLM): + + client: Optional[Unify] = None + + def __init__( + self, + name: Optional[str] = None, + unify_api_key: Optional[str] = None, + ): + + if name is None: + name = os.environ.get("UNIFY_END_POINT", "llama-3-8b-chat@together-ai") + + super().__init__(name=name) + + try: + self.client = Unify(name, api_key=unify_api_key) + except Exception as e: + raise ValueError( + f"Unify API client failed to initialize. Error: {e}" + ) from e + + def __call__(self, messages: List[Message]) -> str: + if self.client is None: + raise ValueError("Unify client is not initialized.") + try: + output = self.client.generate(messages=[m.to_openai() for m in messages]) + + if not output: + raise Exception("No output generated") + return output + + except Exception as e: + raise UnifyError(f"Unify API call failed. Error: {e}") from e From efb310f141f5fb1b864245ef87543f52473532d6 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:52:38 +0300 Subject: [PATCH 02/46] Update __init__.py --- semantic_router/llms/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/semantic_router/llms/__init__.py b/semantic_router/llms/__init__.py index 36f13c8d..6f389de8 100644 --- a/semantic_router/llms/__init__.py +++ b/semantic_router/llms/__init__.py @@ -4,6 +4,7 @@ from semantic_router.llms.mistral import MistralAILLM from semantic_router.llms.openai import OpenAILLM from semantic_router.llms.openrouter import OpenRouterLLM +from semantic_router.llms.unify import UnifyLLM from semantic_router.llms.zure import AzureOpenAILLM __all__ = [ @@ -14,4 +15,5 @@ "CohereLLM", "AzureOpenAILLM", "MistralAILLM", + "UnifyLLM", ] From babef94072828207cdb97575b0f60c68c93a1adb Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:54:59 +0300 Subject: [PATCH 03/46] Update defaults.py --- semantic_router/utils/defaults.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/semantic_router/utils/defaults.py b/semantic_router/utils/defaults.py index 151a9935..17ad950a 100644 --- a/semantic_router/utils/defaults.py +++ b/semantic_router/utils/defaults.py @@ -34,5 +34,10 @@ class EncoderDefault(Enum): BEDROCK = { "embedding_model": os.environ.get( "BEDROCK_EMBEDDING_MODEL", "amazon.titan-embed-image-v1" + ), + } + UNIFy = { + "end_point": os.environ.get("UNIFY_END_POINT", "llama-3-8b-chat@together-ai" ) } + From d91bc3b2d95e129ca731e22ed00302c26c636e18 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:56:08 +0300 Subject: [PATCH 04/46] Update unify.py --- semantic_router/llms/unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 443e78f9..3b16bd04 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -21,7 +21,7 @@ def __init__( ): if name is None: - name = os.environ.get("UNIFY_END_POINT", "llama-3-8b-chat@together-ai") + name = EncoderDefault.UNIFy.value["end_point"] super().__init__(name=name) From 918c4f966739c394e607a0c42f33f831594ac9e1 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 19:48:21 +0300 Subject: [PATCH 05/46] Update unify.py --- semantic_router/llms/unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 3b16bd04..2628d0d6 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -6,7 +6,7 @@ from semantic_router.llms import BaseLLM from semantic_router.schema import Message -from unify.utils import _validate_api_key +# from unify.utils import _validate_api_key from unify.exceptions import UnifyError from unify.clients import Unify, AsyncUnify From 812dd77b4b66b6f03fd1067deaa53a6bd6bf94ed Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 21 Aug 2024 20:37:39 +0300 Subject: [PATCH 06/46] Update unify.py --- semantic_router/llms/unify.py | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 2628d0d6..14631d15 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -25,22 +25,17 @@ def __init__( super().__init__(name=name) - try: - self.client = Unify(name, api_key=unify_api_key) - except Exception as e: - raise ValueError( - f"Unify API client failed to initialize. Error: {e}" - ) from e + self.client = Unify(name, api_key=unify_api_key) - def __call__(self, messages: List[Message]) -> str: - if self.client is None: - raise ValueError("Unify client is not initialized.") - try: - output = self.client.generate(messages=[m.to_openai() for m in messages]) + def __call__(self, messages: List[Message]) -> str: + if self.client is None: + raise UnifyError("Unify client is not initialized.") + try: + output = self.client.generate(messages=[m.to_openai() for m in messages]) - if not output: - raise Exception("No output generated") - return output + if not output: + raise Exception("No output generated") + return output - except Exception as e: - raise UnifyError(f"Unify API call failed. Error: {e}") from e + except Exception as e: + raise UnifyError(f"Unify API call failed. Error: {e}") from e From 086407722fbf667db107192b52d53512c7815bba Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 12:30:10 +0200 Subject: [PATCH 07/46] updates to EncodeDefault.UNIFY and unify.py --- semantic_router/llms/unify.py | 26 ++++++++++++++++---------- semantic_router/utils/defaults.py | 13 +++++++++---- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 14631d15..2b84c8fe 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -5,37 +5,43 @@ from semantic_router.llms import BaseLLM from semantic_router.schema import Message +from semantic_router.utils.defaults import EncoderDefault # from unify.utils import _validate_api_key from unify.exceptions import UnifyError from unify.clients import Unify, AsyncUnify + class UnifyLLM(BaseLLM): client: Optional[Unify] = None - + async_client: Optional[AsyncUnify] = None + def __init__( self, name: Optional[str] = None, unify_api_key: Optional[str] = None, ): - + if name is None: - name = EncoderDefault.UNIFy.value["end_point"] - + name = f"{EncoderDefault.UNIFY.value["language_model"]}@\ + {EncoderDefault.UNIFY.value["language_provider"]}" + super().__init__(name=name) - - self.client = Unify(name, api_key=unify_api_key) + + self.client = Unify(endpoint=name, api_key=unify_api_key) def __call__(self, messages: List[Message]) -> str: if self.client is None: raise UnifyError("Unify client is not initialized.") try: - output = self.client.generate(messages=[m.to_openai() for m in messages]) - + output = self.client.generate( + messages=[m.to_openai() for m in messages] + ) + if not output: - raise Exception("No output generated") + raise Exception("No output generated") return output except Exception as e: - raise UnifyError(f"Unify API call failed. Error: {e}") from e + raise UnifyError(f"Unify API call failed. Error: {e}") from e diff --git a/semantic_router/utils/defaults.py b/semantic_router/utils/defaults.py index 17ad950a..f40cf200 100644 --- a/semantic_router/utils/defaults.py +++ b/semantic_router/utils/defaults.py @@ -36,8 +36,13 @@ class EncoderDefault(Enum): "BEDROCK_EMBEDDING_MODEL", "amazon.titan-embed-image-v1" ), } - UNIFy = { - "end_point": os.environ.get("UNIFY_END_POINT", "llama-3-8b-chat@together-ai" - ) + UNIFY = { + "language_model": os.environ.get( + "UNIFY_CHAT_MODEL_NAME", + "llama-3-8b-chat", + ), + "language_provider": os.environ.get( + "UNIFY_CHAT_MODEL_PROVIDER", + "together-ai", + ), } - From 428bf73c78e82a8d165f3629109650aec180d635 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 13:51:13 +0200 Subject: [PATCH 08/46] adds test --- semantic_router/llms/unify.py | 6 ++---- tests/unit/llms/test_llm_unify.py | 32 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 tests/unit/llms/test_llm_unify.py diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 2b84c8fe..1f12f97e 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -1,7 +1,4 @@ -import os -from typing import Dict, List, Optional - -import unify +from typing import List, Optional from semantic_router.llms import BaseLLM from semantic_router.schema import Message @@ -45,3 +42,4 @@ def __call__(self, messages: List[Message]) -> str: except Exception as e: raise UnifyError(f"Unify API call failed. Error: {e}") from e + diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py new file mode 100644 index 00000000..0b71abb7 --- /dev/null +++ b/tests/unit/llms/test_llm_unify.py @@ -0,0 +1,32 @@ +import pytest + +from semantic_router.llms.unify import UnifyLLM +from semantic_router.schema import Message + + +@pytest.fixture +def unify_llm(): + return UnifyLLM() + + +class TestOllamaLLM: + def test_ollama_llm_init_success(self, ollama_llm): + assert unify_llm.name == "ollama" + assert unify_llm.temperature == 0.2 + assert unify_llm.llm_name == "openhermes" + assert unify_llm.max_tokens == 200 + assert unify_llm.stream is False + + def test_ollama_llm_call_success(self, unify_llm, mocker): + mock_response = mocker.MagicMock() + mock_response.json.return_value = {"message": {"content": "test response"}} + mocker.patch("requests.post", return_value=mock_response) + + output = unify_llm([Message(role="user", content="test")]) + assert output == "test response" + + def test_ollama_llm_error_handling(self, unify_llm, mocker): + mocker.patch("requests.post", side_effect=Exception("LLM error")) + with pytest.raises(Exception) as exc_info: + unify_llm([Message(role="user", content="test")]) + assert "LLM error" in str(exc_info.value) From ed6036972ea46796cca462720c409d081f5127da Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:14:39 +0200 Subject: [PATCH 09/46] updates unify.py and tests --- semantic_router/llms/unify.py | 23 +++++++++++++++++------ tests/unit/llms/test_llm_unify.py | 9 ++++----- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 1f12f97e..9f05fb35 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -11,13 +11,19 @@ class UnifyLLM(BaseLLM): - client: Optional[Unify] = None - async_client: Optional[AsyncUnify] = None + client: Optional[Unify] + async_client: Optional[AsyncUnify] + temperature: Optional[float] + max_tokens: Optional[int] + stream: Optional[bool] def __init__( self, - name: Optional[str] = None, - unify_api_key: Optional[str] = None, + name: Optional[str], + unify_api_key: Optional[str], + temperature: Optional[float] = 0.01, + max_tokens: Optional[int] = 200, + stream: bool = False, ): if name is None: @@ -25,7 +31,9 @@ def __init__( {EncoderDefault.UNIFY.value["language_provider"]}" super().__init__(name=name) - + self.temperature = temperature + self.max_tokens = max_tokens + self.stream = stream self.client = Unify(endpoint=name, api_key=unify_api_key) def __call__(self, messages: List[Message]) -> str: @@ -33,7 +41,10 @@ def __call__(self, messages: List[Message]) -> str: raise UnifyError("Unify client is not initialized.") try: output = self.client.generate( - messages=[m.to_openai() for m in messages] + messages=[m.to_openai() for m in messages], + max_tokens=self.max_tokens, + temperature=self.temperature, + stream=self.stream, ) if not output: diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0b71abb7..e9e766d1 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -10,14 +10,13 @@ def unify_llm(): class TestOllamaLLM: - def test_ollama_llm_init_success(self, ollama_llm): - assert unify_llm.name == "ollama" - assert unify_llm.temperature == 0.2 - assert unify_llm.llm_name == "openhermes" + def test_unify_llm_init_success(self, unify_llm): + assert unify_llm.name == "gpt-4o@openai" + assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 assert unify_llm.stream is False - def test_ollama_llm_call_success(self, unify_llm, mocker): + def test_unify_llm_call_success(self, unify_llm, mocker): mock_response = mocker.MagicMock() mock_response.json.return_value = {"message": {"content": "test response"}} mocker.patch("requests.post", return_value=mock_response) From 691edb86b32c60fd171956edd66521b7b6a800b4 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:16:17 +0200 Subject: [PATCH 10/46] updates unify.py and tests --- tests/unit/llms/test_llm_unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index e9e766d1..0711ad6f 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -9,7 +9,7 @@ def unify_llm(): return UnifyLLM() -class TestOllamaLLM: +class TestUnifyLLM: def test_unify_llm_init_success(self, unify_llm): assert unify_llm.name == "gpt-4o@openai" assert unify_llm.temperature == 0.01 From bf312a44574687d802db3778c34f303e8c89b5a4 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:17:35 +0200 Subject: [PATCH 11/46] updates unify.py and tests --- tests/unit/llms/test_llm_unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0711ad6f..658fb864 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -1,6 +1,6 @@ import pytest -from semantic_router.llms.unify import UnifyLLM +from semantic_router.llms import UnifyLLM from semantic_router.schema import Message From 434203bc51e13fea69855358ae007108664453b2 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:18:58 +0200 Subject: [PATCH 12/46] updates unify.py and tests --- tests/unit/llms/test_llm_unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 658fb864..0711ad6f 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -1,6 +1,6 @@ import pytest -from semantic_router.llms import UnifyLLM +from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message From 636496f708fd7c628a8f057b0afe2c598be39b35 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:34:20 +0200 Subject: [PATCH 13/46] updates unify.py and tests --- semantic_router/llms/unify.py | 4 ++-- tests/unit/llms/test_llm_unify.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 9f05fb35..64cb6cff 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -19,8 +19,8 @@ class UnifyLLM(BaseLLM): def __init__( self, - name: Optional[str], - unify_api_key: Optional[str], + name: Optional[str] = None, + unify_api_key: Optional[str] = None, temperature: Optional[float] = 0.01, max_tokens: Optional[int] = 200, stream: bool = False, diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0711ad6f..0a2eaf22 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -24,7 +24,7 @@ def test_unify_llm_call_success(self, unify_llm, mocker): output = unify_llm([Message(role="user", content="test")]) assert output == "test response" - def test_ollama_llm_error_handling(self, unify_llm, mocker): + def test_unify_llm_error_handling(self, unify_llm, mocker): mocker.patch("requests.post", side_effect=Exception("LLM error")) with pytest.raises(Exception) as exc_info: unify_llm([Message(role="user", content="test")]) From 0886aaf9aa0bb981fc4450730c82d69a37d95047 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 14:42:50 +0200 Subject: [PATCH 14/46] updates tests --- tests/unit/llms/test_llm_unify.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0a2eaf22..70d272b9 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -1,8 +1,10 @@ import pytest +from dotenv import load_dotenv from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message +load_dotenv() @pytest.fixture def unify_llm(): From 533d0457a84bef2605ed95b372dbaafa2e3efb16 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 15:14:43 +0200 Subject: [PATCH 15/46] updates unify.py and tests --- tests/unit/llms/test_llm_unify.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 70d272b9..0a2eaf22 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -1,10 +1,8 @@ import pytest -from dotenv import load_dotenv from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message -load_dotenv() @pytest.fixture def unify_llm(): From 577e0fde0ade6fbe373bdee5036cf330ff7a7f6e Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Thu, 22 Aug 2024 16:28:36 +0200 Subject: [PATCH 16/46] updates tests --- poetry.lock | 6 +++--- pyproject.toml | 1 + tests/unit/llms/test_llm_unify.py | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index a6d4c7d7..a03cdb25 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -3831,7 +3831,7 @@ six = ">=1.5" name = "python-dotenv" version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, @@ -5382,4 +5382,4 @@ vision = ["pillow", "torch", "torchvision", "transformers"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "32e1158d0742323cf5e3a7262ceec1229829d80df399caa62b5b693bc430932a" +content-hash = "8b916cdcdc2f555b12572e63751c6d06087fb02dd089d53ba6128028f4fc8db9" diff --git a/pyproject.toml b/pyproject.toml index f69ceca0..6fe05c72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,7 @@ botocore = {version = "^1.34.110", optional = true} aiohttp = "^3.9.5" fastembed = {version = "^0.3.0", optional = true} psycopg2 = {version = "^2.9.9", optional = true} +python-dotenv = "^1.0.1" [tool.poetry.extras] hybrid = ["pinecone-text"] diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0a2eaf22..dfa9e5be 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,7 +2,9 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message +from dotenv import load_dotenv +load_dotenv() @pytest.fixture def unify_llm(): From c858590d74155cd1d9fd09df59beb62b0c60fba7 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:12:11 +0300 Subject: [PATCH 17/46] Update unify.py --- semantic_router/llms/unify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 64cb6cff..d2a9c387 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -27,8 +27,8 @@ def __init__( ): if name is None: - name = f"{EncoderDefault.UNIFY.value["language_model"]}@\ - {EncoderDefault.UNIFY.value["language_provider"]}" + name = f"{EncoderDefault.UNIFY.value['language_model']}@\ + {EncoderDefault.UNIFY.value['language_provider']}" super().__init__(name=name) self.temperature = temperature From 1a379e3cd48f5bebdbc0d126b2176d1f37a78b37 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:21:13 +0300 Subject: [PATCH 18/46] Update defaults.py --- semantic_router/utils/defaults.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/semantic_router/utils/defaults.py b/semantic_router/utils/defaults.py index f40cf200..af4ef031 100644 --- a/semantic_router/utils/defaults.py +++ b/semantic_router/utils/defaults.py @@ -38,11 +38,9 @@ class EncoderDefault(Enum): } UNIFY = { "language_model": os.environ.get( - "UNIFY_CHAT_MODEL_NAME", - "llama-3-8b-chat", - ), + "UNIFY_CHAT_MODEL_NAME", "llama-3-8b-chat" + ), "language_provider": os.environ.get( - "UNIFY_CHAT_MODEL_PROVIDER", - "together-ai", - ), + "UNIFY_CHAT_MODEL_PROVIDER", "together-ai" + ), } From 225f1037b527e0f5995cad9718a160afe5a11da8 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:49:21 +0300 Subject: [PATCH 19/46] Update unify.py --- semantic_router/llms/unify.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index d2a9c387..0481b240 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -27,8 +27,7 @@ def __init__( ): if name is None: - name = f"{EncoderDefault.UNIFY.value['language_model']}@\ - {EncoderDefault.UNIFY.value['language_provider']}" + name = f"{EncoderDefault.UNIFY.value['language_model']}@{EncoderDefault.UNIFY.value['language_provider']}" super().__init__(name=name) self.temperature = temperature @@ -48,7 +47,7 @@ def __call__(self, messages: List[Message]) -> str: ) if not output: - raise Exception("No output generated") + raise UnifyError("No output generated") return output except Exception as e: From 38f81a6d64981a73fed4889bc4d5dfd3d3d79edd Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:50:29 +0300 Subject: [PATCH 20/46] Update unify.py --- semantic_router/llms/unify.py | 1 - 1 file changed, 1 deletion(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 0481b240..b572af03 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -4,7 +4,6 @@ from semantic_router.schema import Message from semantic_router.utils.defaults import EncoderDefault -# from unify.utils import _validate_api_key from unify.exceptions import UnifyError from unify.clients import Unify, AsyncUnify From 9b39b7911f32bcab7b06a8a6b3a8b828be93cea3 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 22 Aug 2024 21:02:26 +0300 Subject: [PATCH 21/46] Update unify.py --- semantic_router/llms/unify.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index b572af03..a3b51df6 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -1,3 +1,4 @@ +import asyncio from typing import List, Optional from semantic_router.llms import BaseLLM @@ -33,6 +34,7 @@ def __init__( self.max_tokens = max_tokens self.stream = stream self.client = Unify(endpoint=name, api_key=unify_api_key) + self.async_client = AsyncUnify(endpoint=name, api_key=unify_api_key) def __call__(self, messages: List[Message]) -> str: if self.client is None: @@ -52,3 +54,25 @@ def __call__(self, messages: List[Message]) -> str: except Exception as e: raise UnifyError(f"Unify API call failed. Error: {e}") from e + async def acall(self, messages: List[Message]) -> str: + if self.async_client is None: + raise UnifyError("Unify async_client is not initialized.") + try: + + async def main(): + responses = await self.async_client.generate( + messages=[m.to_openai() for m in messages], + max_tokens=self.max_tokens, + temperature=self.temperature, + stream=self.stream, + ) + return responses + + output = await main() + + if not output: + raise UnifyError("No output generated") + return output + + except Exception as e: + raise UnifyError(f"Unify API call failed. Error: {e}") from e From cafbe8a3f3763d5e00f6f919a99b22de2473c569 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:28:26 +0300 Subject: [PATCH 22/46] Update pyproject.toml --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6fe05c72..b38e3470 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,8 @@ python = ">=3.9,<3.13" pydantic = "^2.5.3" openai = ">=1.10.0,<2.0.0" cohere = ">=5.00,<6.00" -mistralai= {version = ">=0.0.12,<0.1.0", optional = true} +mistralai = {version = ">=0.0.12,<0.1.0", optional = true} +unify = ">0.9.2" numpy = "^1.25.2" colorlog = "^6.8.0" pyyaml = "^6.0.1" From 58d8e9150eb0356c96a534ef4caaf1d8834398a4 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:31:20 +0300 Subject: [PATCH 23/46] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b38e3470..1a071925 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ pydantic = "^2.5.3" openai = ">=1.10.0,<2.0.0" cohere = ">=5.00,<6.00" mistralai = {version = ">=0.0.12,<0.1.0", optional = true} -unify = ">0.9.2" +unify = ">0.9.0" numpy = "^1.25.2" colorlog = "^6.8.0" pyyaml = "^6.0.1" From c5a245abaaa870daa7dfd79e26d421dc428a229c Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:49:14 +0300 Subject: [PATCH 24/46] Update pyproject.toml --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1a071925..786362c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ pydantic = "^2.5.3" openai = ">=1.10.0,<2.0.0" cohere = ">=5.00,<6.00" mistralai = {version = ">=0.0.12,<0.1.0", optional = true} -unify = ">0.9.0" +unifyai = "^0.9.1" numpy = "^1.25.2" colorlog = "^6.8.0" pyyaml = "^6.0.1" @@ -60,6 +60,7 @@ google = ["google-cloud-aiplatform"] bedrock = ["boto3", "botocore"] postgres = ["psycopg2"] fastembed = ["fastembed"] +unify = ["unifyai"] [tool.poetry.group.dev.dependencies] ipykernel = "^6.25.0" From 7407033de6ec8492affa142827fe6c2bcfe9686b Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Sun, 25 Aug 2024 07:10:24 +0300 Subject: [PATCH 25/46] Update unify.py --- semantic_router/llms/unify.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index a3b51df6..47e4f240 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -59,16 +59,13 @@ async def acall(self, messages: List[Message]) -> str: raise UnifyError("Unify async_client is not initialized.") try: - async def main(): - responses = await self.async_client.generate( - messages=[m.to_openai() for m in messages], - max_tokens=self.max_tokens, - temperature=self.temperature, - stream=self.stream, - ) - return responses - - output = await main() + output = await self.async_client.generate( + messages=[m.to_openai() for m in messages], + max_tokens=self.max_tokens, + temperature=self.temperature, + stream=self.stream, + ) + if not output: raise UnifyError("No output generated") From 0d49633a5368f34a73caef347368530c10f3209a Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 10:41:01 +0200 Subject: [PATCH 26/46] adds mock to tests --- tests/unit/llms/test_llm_unify.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index dfa9e5be..56e2afd7 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,13 +2,16 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message +from unittest.mock import patch + from dotenv import load_dotenv load_dotenv() @pytest.fixture -def unify_llm(): - return UnifyLLM() +def unify_llm(mocker): + mocker.patch("unify.clients.Unify") + return UnifyLLM(unify_api_key="fake-api-key") class TestUnifyLLM: From 4e9a81156bd0b5b86a72dbd844bf625a07ca3a45 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 10:50:40 +0200 Subject: [PATCH 27/46] fixing pytest mock --- tests/unit/llms/test_llm_unify.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 56e2afd7..4ab2482d 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -15,8 +15,14 @@ def unify_llm(mocker): class TestUnifyLLM: + + def test_unify_llm_init_success(self, mocker): + mocker.patch("os.getenv", return_value="fake-api-key") + llm = UnifyLLM() + assert llm.client is not None + def test_unify_llm_init_success(self, unify_llm): - assert unify_llm.name == "gpt-4o@openai" + assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 assert unify_llm.stream is False @@ -24,7 +30,7 @@ def test_unify_llm_init_success(self, unify_llm): def test_unify_llm_call_success(self, unify_llm, mocker): mock_response = mocker.MagicMock() mock_response.json.return_value = {"message": {"content": "test response"}} - mocker.patch("requests.post", return_value=mock_response) + mocker.patch("unify.clients.Unify.generate", return_value=mock_response) output = unify_llm([Message(role="user", content="test")]) assert output == "test response" From 06369b5a3590554c4d5de920930a174956573475 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 10:55:38 +0200 Subject: [PATCH 28/46] fixing pytest mock --- tests/unit/llms/test_llm_unify.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 4ab2482d..977c6921 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -16,18 +16,20 @@ def unify_llm(mocker): class TestUnifyLLM: - def test_unify_llm_init_success(self, mocker): + def test_unify_llm_init_success_1(self, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - llm = UnifyLLM() + llm = unify_llm assert llm.client is not None - def test_unify_llm_init_success(self, unify_llm): - assert unify_llm.name == "llama-3-8b-chat@together-ai" - assert unify_llm.temperature == 0.01 - assert unify_llm.max_tokens == 200 - assert unify_llm.stream is False + def test_unify_llm_init_success_2(self, mocker): + mocker.patch("os.getenv", return_value="fake-api-key") + llm = unify_llm + assert llm.name == "llama-3-8b-chat@together-ai" + assert llm.temperature == 0.01 + assert llm.max_tokens == 200 + assert llm.stream is False - def test_unify_llm_call_success(self, unify_llm, mocker): + def test_unify_llm_call_success(self, mocker): mock_response = mocker.MagicMock() mock_response.json.return_value = {"message": {"content": "test response"}} mocker.patch("unify.clients.Unify.generate", return_value=mock_response) @@ -35,7 +37,7 @@ def test_unify_llm_call_success(self, unify_llm, mocker): output = unify_llm([Message(role="user", content="test")]) assert output == "test response" - def test_unify_llm_error_handling(self, unify_llm, mocker): + def test_unify_llm_error_handling(self, mocker): mocker.patch("requests.post", side_effect=Exception("LLM error")) with pytest.raises(Exception) as exc_info: unify_llm([Message(role="user", content="test")]) From 7b624c71984e8f86bbb60ca2a50d15364589c8cb Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 10:59:56 +0200 Subject: [PATCH 29/46] fixing pytest mock --- tests/unit/llms/test_llm_unify.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 977c6921..ef210955 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -16,20 +16,18 @@ def unify_llm(mocker): class TestUnifyLLM: - def test_unify_llm_init_success_1(self, mocker): + def test_unify_llm_init_success_1(self, unify_llm, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - llm = unify_llm - assert llm.client is not None + assert unify_llm.client is not None - def test_unify_llm_init_success_2(self, mocker): + def test_unify_llm_init_success_2(self, unify_llm, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - llm = unify_llm - assert llm.name == "llama-3-8b-chat@together-ai" - assert llm.temperature == 0.01 - assert llm.max_tokens == 200 - assert llm.stream is False + assert unify_llm.name == "llama-3-8b-chat@together-ai" + assert unify_llm.temperature == 0.01 + assert unify_llm.max_tokens == 200 + assert unify_llm.stream is False - def test_unify_llm_call_success(self, mocker): + def test_unify_llm_call_success(self, unify_llm, mocker): mock_response = mocker.MagicMock() mock_response.json.return_value = {"message": {"content": "test response"}} mocker.patch("unify.clients.Unify.generate", return_value=mock_response) @@ -37,7 +35,7 @@ def test_unify_llm_call_success(self, mocker): output = unify_llm([Message(role="user", content="test")]) assert output == "test response" - def test_unify_llm_error_handling(self, mocker): + def test_unify_llm_error_handling(self, unify_llm, mocker): mocker.patch("requests.post", side_effect=Exception("LLM error")) with pytest.raises(Exception) as exc_info: unify_llm([Message(role="user", content="test")]) From bec1b475c89bde216aa2c6a484fda24c66defe11 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 11:15:28 +0200 Subject: [PATCH 30/46] fixing pytest mock --- tests/unit/llms/test_llm_unify.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index ef210955..f485ec25 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,6 +2,7 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message +import unify.clients from unittest.mock import patch from dotenv import load_dotenv @@ -11,6 +12,7 @@ @pytest.fixture def unify_llm(mocker): mocker.patch("unify.clients.Unify") + mocker.patch.object(unify.clients.Unify, "set_endpoint") return UnifyLLM(unify_api_key="fake-api-key") @@ -18,24 +20,28 @@ class TestUnifyLLM: def test_unify_llm_init_success_1(self, unify_llm, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - assert unify_llm.client is not None + + assert unify_llm.client is not None def test_unify_llm_init_success_2(self, unify_llm, mocker): mocker.patch("os.getenv", return_value="fake-api-key") + assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 assert unify_llm.stream is False def test_unify_llm_call_success(self, unify_llm, mocker): + mock_response = mocker.MagicMock() mock_response.json.return_value = {"message": {"content": "test response"}} - mocker.patch("unify.clients.Unify.generate", return_value=mock_response) + mocker.patch.object(unify_llm.client, "generate", return_value=mock_response) output = unify_llm([Message(role="user", content="test")]) assert output == "test response" def test_unify_llm_error_handling(self, unify_llm, mocker): + mocker.patch("requests.post", side_effect=Exception("LLM error")) with pytest.raises(Exception) as exc_info: unify_llm([Message(role="user", content="test")]) From 861adc302af0d3bf498378fbbab4b313c2566bc9 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 12:18:18 +0200 Subject: [PATCH 31/46] rewrites __call__ as a generator for _call | _acall, updates mock tests --- semantic_router/llms/unify.py | 43 +++++++++++++++++++------------ tests/unit/llms/test_llm_unify.py | 7 +++-- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 47e4f240..064cc544 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -1,5 +1,5 @@ import asyncio -from typing import List, Optional +from typing import List, Optional, Coroutine, Callable, Any, Union from semantic_router.llms import BaseLLM from semantic_router.schema import Message @@ -24,6 +24,7 @@ def __init__( temperature: Optional[float] = 0.01, max_tokens: Optional[int] = 200, stream: bool = False, + Async: bool = False, ): if name is None: @@ -35,8 +36,16 @@ def __init__( self.stream = stream self.client = Unify(endpoint=name, api_key=unify_api_key) self.async_client = AsyncUnify(endpoint=name, api_key=unify_api_key) + self.Async = Async - def __call__(self, messages: List[Message]) -> str: + def __call__(self, messages: List[Message]) -> Any: + func: Union[Callable[..., str], + Callable[..., Coroutine[Any, Any, str]]] = ( + self._call if not self.Async else self._acall + ) + return func(messages) + + def _call(self, messages: List[Message]) -> str: if self.client is None: raise UnifyError("Unify client is not initialized.") try: @@ -54,22 +63,22 @@ def __call__(self, messages: List[Message]) -> str: except Exception as e: raise UnifyError(f"Unify API call failed. Error: {e}") from e - async def acall(self, messages: List[Message]) -> str: + async def _acall(self, messages: List[Message]) -> str: if self.async_client is None: raise UnifyError("Unify async_client is not initialized.") - try: - - output = await self.async_client.generate( - messages=[m.to_openai() for m in messages], - max_tokens=self.max_tokens, - temperature=self.temperature, - stream=self.stream, - ) - + try: + + output = await self.async_client.generate( + messages=[m.to_openai() for m in messages], + max_tokens=self.max_tokens, + temperature=self.temperature, + stream=self.stream, + ) - if not output: - raise UnifyError("No output generated") - return output + + if not output: + raise UnifyError("No output generated") + return output - except Exception as e: - raise UnifyError(f"Unify API call failed. Error: {e}") from e + except Exception as e: + raise UnifyError(f"Unify API call failed. Error: {e}") from e diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index f485ec25..db81486f 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,7 +2,7 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message -import unify.clients +from unify.clients import Unify, AsyncUnify from unittest.mock import patch from dotenv import load_dotenv @@ -12,7 +12,9 @@ @pytest.fixture def unify_llm(mocker): mocker.patch("unify.clients.Unify") - mocker.patch.object(unify.clients.Unify, "set_endpoint") + mocker.patch.object(Unify, "set_endpoint", return_value=None) + mocker.patch.object(AsyncUnify, "set_endpoint", return_value=None) + return UnifyLLM(unify_api_key="fake-api-key") @@ -20,6 +22,7 @@ class TestUnifyLLM: def test_unify_llm_init_success_1(self, unify_llm, mocker): mocker.patch("os.getenv", return_value="fake-api-key") + mocker.patch.object(unify_llm.client, "set_endpoint", return_value=None) assert unify_llm.client is not None From 5d95892078aba2fe9d5db008a3516d72e38d76ee Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Tue, 27 Aug 2024 12:19:29 +0200 Subject: [PATCH 32/46] rewrites __call__ as a generator for _call | _acall, updates mock tests --- semantic_router/llms/unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 064cc544..6a2ec829 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -36,7 +36,7 @@ def __init__( self.stream = stream self.client = Unify(endpoint=name, api_key=unify_api_key) self.async_client = AsyncUnify(endpoint=name, api_key=unify_api_key) - self.Async = Async + self.Async = Async # noqa: C0103 def __call__(self, messages: List[Message]) -> Any: func: Union[Callable[..., str], From fb2c83c14ccb3555a8de98b0f2498f5928de685c Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:05:41 +0300 Subject: [PATCH 33/46] Update poetry.lock --- poetry.lock | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index a03cdb25..c570dbaf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -5180,6 +5180,21 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "unifyai" +version = "0.9.2" +description = "A Python package for interacting with the Unify API" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "unifyai-0.9.2-py3-none-any.whl", hash = "sha256:03c4547316712a1d6592011effb3bd5fff71b811cf698ad92c662150fe62e322"}, + {file = "unifyai-0.9.2.tar.gz", hash = "sha256:bdf5b8edc9d412e5aeb8d1328b5699c061f918471a2d3f474eb1f57b1a0a508e"}, +] + +[package.dependencies] +openai = ">=1.12.0,<2.0.0" +requests = ">=2.31.0,<3.0.0" + [[package]] name = "urllib3" version = "1.26.19" @@ -5377,9 +5392,10 @@ pinecone = ["pinecone-client"] postgres = ["psycopg2"] processing = ["matplotlib"] qdrant = ["qdrant-client"] +unify = ["unifyai"] vision = ["pillow", "torch", "torchvision", "transformers"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "8b916cdcdc2f555b12572e63751c6d06087fb02dd089d53ba6128028f4fc8db9" +content-hash = "583b9f9f4889eb8c0b65d975fd357e667e4ba95a32e4757c4a16886adef00559" From 662faff30b2f3cdd6f7c605ace5be3ddbbe4dc5c Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Tue, 27 Aug 2024 19:32:07 +0300 Subject: [PATCH 34/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 53 +++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index dfa9e5be..bc5bdd54 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,32 +2,51 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message -from dotenv import load_dotenv +# from dotenv import load_dotenv -load_dotenv() +# load_dotenv() @pytest.fixture -def unify_llm(): - return UnifyLLM() +def unify_llm(mocker): + mocker.patch("unify.clients.Unify") + mocker.patch("json.loads", return_value=["llama-3-8b-chat@together-ai"]) + return UnifyLLM(unify_api_key="test_api_key") class TestUnifyLLM: - def test_unify_llm_init_success(self, unify_llm): - assert unify_llm.name == "gpt-4o@openai" + def test_unify_llm_init_parameter_success(self, unify_llm): + assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 assert unify_llm.stream is False + + def test_unify_llm_init_with_api_key(self, unify_llm): + assert unify_llm.client is not None, "Client should be initialized" + assert unify_llm.name == "llama-3-8b-chat@together-ai", "Default name not set correctly" - def test_unify_llm_call_success(self, unify_llm, mocker): - mock_response = mocker.MagicMock() - mock_response.json.return_value = {"message": {"content": "test response"}} - mocker.patch("requests.post", return_value=mock_response) - output = unify_llm([Message(role="user", content="test")]) - assert output == "test response" +# @pytest.fixture +# def unify_llm(): +# return UnifyLLM() - def test_unify_llm_error_handling(self, unify_llm, mocker): - mocker.patch("requests.post", side_effect=Exception("LLM error")) - with pytest.raises(Exception) as exc_info: - unify_llm([Message(role="user", content="test")]) - assert "LLM error" in str(exc_info.value) + +# class TestUnifyLLM: +# def test_unify_llm_init_success(self, unify_llm): +# assert unify_llm.name == "gpt-4o@openai" +# assert unify_llm.temperature == 0.01 +# assert unify_llm.max_tokens == 200 +# assert unify_llm.stream is False + +# def test_unify_llm_call_success(self, unify_llm, mocker): +# mock_response = mocker.MagicMock() +# mock_response.json.return_value = {"message": {"content": "test response"}} +# mocker.patch("requests.post", return_value=mock_response) + +# output = unify_llm([Message(role="user", content="test")]) +# assert output == "test response" + +# def test_unify_llm_error_handling(self, unify_llm, mocker): +# mocker.patch("requests.post", side_effect=Exception("LLM error")) +# with pytest.raises(Exception) as exc_info: +# unify_llm([Message(role="user", content="test")]) +# assert "LLM error" in str(exc_info.value) From dfc352de6cc7f5d83141ccd6aba1cf65e0aacfe0 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Tue, 27 Aug 2024 19:46:55 +0300 Subject: [PATCH 35/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index bc5bdd54..014359bf 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -24,6 +24,11 @@ def test_unify_llm_init_with_api_key(self, unify_llm): assert unify_llm.client is not None, "Client should be initialized" assert unify_llm.name == "llama-3-8b-chat@together-ai", "Default name not set correctly" + def test_unify_llm_init_without_api_key(self, mocker): + mocker.patch("os.environ.get", return_value=None) + with pytest.raises(KeyError) as _: + UnifyLLM() + # @pytest.fixture # def unify_llm(): From 3254d731d20c14520085416e377e2d480ef2c13c Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Tue, 27 Aug 2024 20:00:39 +0300 Subject: [PATCH 36/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 014359bf..7088132f 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,6 +2,7 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message +from unify.exceptions import UnifyError # from dotenv import load_dotenv # load_dotenv() @@ -29,6 +30,13 @@ def test_unify_llm_init_without_api_key(self, mocker): with pytest.raises(KeyError) as _: UnifyLLM() + def test_unify_llm_call_uninitialized_client(self, unify_llm): + unify_llm.client = None + with pytest.raises(UnifyError) as e: + llm_input = [Message(role="user", content="test")] + unify_llm(llm_input) + assert "Unify client is not initialized." in str(e.value) + # @pytest.fixture # def unify_llm(): From 4cb4d1782ac59dcfa51b7a83ffe7b221a148a4c1 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Wed, 28 Aug 2024 12:46:38 +0200 Subject: [PATCH 37/46] update --- semantic_router/llms/unify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 6a2ec829..1e070d54 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -74,8 +74,8 @@ async def _acall(self, messages: List[Message]) -> str: temperature=self.temperature, stream=self.stream, ) - - + + if not output: raise UnifyError("No output generated") return output From eb3ca0839a82869ec79ecdb843cfb8cf3fe371b0 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Wed, 28 Aug 2024 12:59:50 +0200 Subject: [PATCH 38/46] cleans errors after merge --- tests/unit/llms/test_llm_unify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index c98d05fc..d870bce8 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -14,11 +14,10 @@ def unify_llm(mocker): mocker.patch("unify.clients.Unify") mocker.patch("json.loads", return_value=["llama-3-8b-chat@together-ai"]) - mocker.patch("unify.clients.Unify") mocker.patch.object(Unify, "set_endpoint", return_value=None) mocker.patch.object(AsyncUnify, "set_endpoint", return_value=None) - return UnifyLLM(unify_api_key="test_api_key"unify_api_key="fake-api-key") + return UnifyLLM(unify_api_key="fake-api-key") class TestUnifyLLM: From 20b39683085d9c89f1d20e90f1ee210505921e83 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Wed, 28 Aug 2024 13:23:09 +0200 Subject: [PATCH 39/46] pytest mock passing 6/6 --- semantic_router/llms/unify.py | 1 + tests/unit/llms/test_llm_unify.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 1e070d54..f01b881c 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -16,6 +16,7 @@ class UnifyLLM(BaseLLM): temperature: Optional[float] max_tokens: Optional[int] stream: Optional[bool] + Async: Optional[bool] def __init__( self, diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index d870bce8..0362b254 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -76,7 +76,7 @@ def test_unify_llm_call_uninitialized_client(self, unify_llm): def test_unify_llm_error_handling(self, unify_llm, mocker): - mocker.patch("requests.post", side_effect=Exception("LLM error")) - with pytest.raises(Exception) as exc_info: + mocker.patch.object(unify_llm.client, "generate", side_effect=Exception("LLM error")) + with pytest.raises(UnifyError) as exc_info: unify_llm([Message(role="user", content="test")]) - assert "LLM error" in str(exc_info.value) + assert "LLM error" in f"{str(exc_info)}, {str(exc_info.value)}" From 7f1f3bf79af2f544fa3a929c5c04be2faba79544 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Wed, 28 Aug 2024 13:33:21 +0200 Subject: [PATCH 40/46] pytest mock passing 7/7 --- tests/unit/llms/test_llm_unify.py | 40 ++++++++++++++----------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 0362b254..a1de4b3a 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -52,31 +52,27 @@ def test_unify_llm_call_uninitialized_client(self, unify_llm): unify_llm(llm_input) assert "Unify client is not initialized." in str(e.value) - -# @pytest.fixture -# def unify_llm(): -# return UnifyLLM() - - -# class TestUnifyLLM: -# def test_unify_llm_init_success(self, unify_llm): -# assert unify_llm.name == "gpt-4o@openai" -# assert unify_llm.temperature == 0.01 -# assert unify_llm.max_tokens == 200 -# assert unify_llm.stream is False - -# def test_unify_llm_call_success(self, unify_llm, mocker): - -# mock_response = mocker.MagicMock() -# mock_response.json.return_value = {"message": {"content": "test response"}} -# mocker.patch.object(unify_llm.client, "generate", return_value=mock_response) - -# output = unify_llm([Message(role="user", content="test")]) -# assert output == "test response" - def test_unify_llm_error_handling(self, unify_llm, mocker): mocker.patch.object(unify_llm.client, "generate", side_effect=Exception("LLM error")) with pytest.raises(UnifyError) as exc_info: unify_llm([Message(role="user", content="test")]) assert "LLM error" in f"{str(exc_info)}, {str(exc_info.value)}" + + + def test_unify_llm_call_success(self, unify_llm, mocker): + + # mock_response = mocker.MagicMock() + # mock_response.choices[0].message.content = {"message": {"content": "test response"}} + mock_response = "test response" # unify currently outputs strings in generate, not completions + mocker.patch.object(unify_llm.client, "generate", return_value=mock_response) + + output = unify_llm([Message(role="user", content="test")]) + assert output == "test response" + + + # def test_unify_llm_init_success(self, unify_llm): + # assert unify_llm.name == "gpt-4o@openai" + # assert unify_llm.temperature == 0.01 + # assert unify_llm.max_tokens == 200 + # assert unify_llm.stream is False \ No newline at end of file From bb8f7d3c697c7c095f4069dec175d8db0ba07bc6 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 28 Aug 2024 16:41:19 +0300 Subject: [PATCH 41/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index a1de4b3a..3f589693 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -70,9 +70,3 @@ def test_unify_llm_call_success(self, unify_llm, mocker): output = unify_llm([Message(role="user", content="test")]) assert output == "test response" - - # def test_unify_llm_init_success(self, unify_llm): - # assert unify_llm.name == "gpt-4o@openai" - # assert unify_llm.temperature == 0.01 - # assert unify_llm.max_tokens == 200 - # assert unify_llm.stream is False \ No newline at end of file From c53db87d2592745c470dd8a24346344c3d1513c7 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:53:40 +0300 Subject: [PATCH 42/46] Update test_llm_unify.py delete some unnecessary parts of the code. --- tests/unit/llms/test_llm_unify.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 3f589693..c7f56751 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -2,18 +2,14 @@ from semantic_router.llms.unify import UnifyLLM from semantic_router.schema import Message -from unify.clients import Unify, AsyncUnify -from unittest.mock import patch +from unify.clients import Unify, AsyncUnify from unify.exceptions import UnifyError -# from dotenv import load_dotenv - -# load_dotenv() @pytest.fixture def unify_llm(mocker): mocker.patch("unify.clients.Unify") - mocker.patch("json.loads", return_value=["llama-3-8b-chat@together-ai"]) + # mocker.patch("json.loads", return_value=["llama-3-8b-chat@together-ai"]) mocker.patch.object(Unify, "set_endpoint", return_value=None) mocker.patch.object(AsyncUnify, "set_endpoint", return_value=None) @@ -22,15 +18,14 @@ def unify_llm(mocker): class TestUnifyLLM: - def test_unify_llm_init_success_1(self, unify_llm, mocker): - mocker.patch("os.getenv", return_value="fake-api-key") - mocker.patch.object(unify_llm.client, "set_endpoint", return_value=None) + # def test_unify_llm_init_success_1(self, unify_llm, mocker): + # mocker.patch("os.getenv", return_value="fake-api-key") + # mocker.patch.object(unify_llm.client, "set_endpoint", return_value=None) - assert unify_llm.client is not None + # assert unify_llm.client is not None def test_unify_llm_init_success_2(self, unify_llm, mocker): - mocker.patch("os.getenv", return_value="fake-api-key") - + # mocker.patch("os.getenv", return_value="fake-api-key") assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 @@ -61,10 +56,8 @@ def test_unify_llm_error_handling(self, unify_llm, mocker): def test_unify_llm_call_success(self, unify_llm, mocker): - - # mock_response = mocker.MagicMock() - # mock_response.choices[0].message.content = {"message": {"content": "test response"}} - mock_response = "test response" # unify currently outputs strings in generate, not completions + + mock_response = "test response" mocker.patch.object(unify_llm.client, "generate", return_value=mock_response) output = unify_llm([Message(role="user", content="test")]) From 3ccffcb218c15de7d4c78d9ec1751eb25e8ff91a Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:55:04 +0300 Subject: [PATCH 43/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index c7f56751..bfa29f6b 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -24,7 +24,7 @@ class TestUnifyLLM: # assert unify_llm.client is not None - def test_unify_llm_init_success_2(self, unify_llm, mocker): + def test_unify_llm_init_success_2(self, unify_llm): # mocker.patch("os.getenv", return_value="fake-api-key") assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 From a874be1e6ff45323acb18319691339996471a6c7 Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:56:54 +0300 Subject: [PATCH 44/46] Update test_llm_unify.py --- tests/unit/llms/test_llm_unify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index bfa29f6b..1ac9cc1d 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -24,7 +24,7 @@ class TestUnifyLLM: # assert unify_llm.client is not None - def test_unify_llm_init_success_2(self, unify_llm): + def test_unify_llm_init_success(self, unify_llm): # mocker.patch("os.getenv", return_value="fake-api-key") assert unify_llm.name == "llama-3-8b-chat@together-ai" assert unify_llm.temperature == 0.01 From 2cba0c273136999dc8d586b433c2d18c0efa8cc3 Mon Sep 17 00:00:00 2001 From: Kacper-W-Kozdon Date: Wed, 28 Aug 2024 17:57:24 +0200 Subject: [PATCH 45/46] applies pre-commit --- semantic_router/llms/unify.py | 20 ++++++++------------ tests/unit/llms/test_llm_unify.py | 16 ++++++++-------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index f01b881c..770dd189 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -1,4 +1,4 @@ -import asyncio +import asyncio # noqa: F401 from typing import List, Optional, Coroutine, Callable, Any, Union from semantic_router.llms import BaseLLM @@ -10,7 +10,6 @@ class UnifyLLM(BaseLLM): - client: Optional[Unify] async_client: Optional[AsyncUnify] temperature: Optional[float] @@ -27,9 +26,9 @@ def __init__( stream: bool = False, Async: bool = False, ): - if name is None: - name = f"{EncoderDefault.UNIFY.value['language_model']}@{EncoderDefault.UNIFY.value['language_provider']}" + name = f"{EncoderDefault.UNIFY.value['language_model']}\ + @{EncoderDefault.UNIFY.value['language_provider']}" super().__init__(name=name) self.temperature = temperature @@ -40,10 +39,9 @@ def __init__( self.Async = Async # noqa: C0103 def __call__(self, messages: List[Message]) -> Any: - func: Union[Callable[..., str], - Callable[..., Coroutine[Any, Any, str]]] = ( - self._call if not self.Async else self._acall - ) + func: Union[Callable[..., str], Callable[..., Coroutine[Any, Any, str]]] = ( + self._call if not self.Async else self._acall + ) return func(messages) def _call(self, messages: List[Message]) -> str: @@ -55,7 +53,7 @@ def _call(self, messages: List[Message]) -> str: max_tokens=self.max_tokens, temperature=self.temperature, stream=self.stream, - ) + ) if not output: raise UnifyError("No output generated") @@ -68,14 +66,12 @@ async def _acall(self, messages: List[Message]) -> str: if self.async_client is None: raise UnifyError("Unify async_client is not initialized.") try: - output = await self.async_client.generate( messages=[m.to_openai() for m in messages], max_tokens=self.max_tokens, temperature=self.temperature, stream=self.stream, - ) - + ) if not output: raise UnifyError("No output generated") diff --git a/tests/unit/llms/test_llm_unify.py b/tests/unit/llms/test_llm_unify.py index 1ac9cc1d..5e00a81b 100644 --- a/tests/unit/llms/test_llm_unify.py +++ b/tests/unit/llms/test_llm_unify.py @@ -6,6 +6,7 @@ from unify.clients import Unify, AsyncUnify from unify.exceptions import UnifyError + @pytest.fixture def unify_llm(mocker): mocker.patch("unify.clients.Unify") @@ -17,7 +18,6 @@ def unify_llm(mocker): class TestUnifyLLM: - # def test_unify_llm_init_success_1(self, unify_llm, mocker): # mocker.patch("os.getenv", return_value="fake-api-key") # mocker.patch.object(unify_llm.client, "set_endpoint", return_value=None) @@ -30,10 +30,12 @@ def test_unify_llm_init_success(self, unify_llm): assert unify_llm.temperature == 0.01 assert unify_llm.max_tokens == 200 assert unify_llm.stream is False - + def test_unify_llm_init_with_api_key(self, unify_llm): assert unify_llm.client is not None, "Client should be initialized" - assert unify_llm.name == "llama-3-8b-chat@together-ai", "Default name not set correctly" + assert ( + unify_llm.name == "llama-3-8b-chat@together-ai" + ), "Default name not set correctly" def test_unify_llm_init_without_api_key(self, mocker): mocker.patch("os.environ.get", return_value=None) @@ -48,18 +50,16 @@ def test_unify_llm_call_uninitialized_client(self, unify_llm): assert "Unify client is not initialized." in str(e.value) def test_unify_llm_error_handling(self, unify_llm, mocker): - - mocker.patch.object(unify_llm.client, "generate", side_effect=Exception("LLM error")) + mocker.patch.object( + unify_llm.client, "generate", side_effect=Exception("LLM error") + ) with pytest.raises(UnifyError) as exc_info: unify_llm([Message(role="user", content="test")]) assert "LLM error" in f"{str(exc_info)}, {str(exc_info.value)}" - def test_unify_llm_call_success(self, unify_llm, mocker): - mock_response = "test response" mocker.patch.object(unify_llm.client, "generate", return_value=mock_response) output = unify_llm([Message(role="user", content="test")]) assert output == "test response" - From af64e75132d977f79e2df2e78e5db8a37dc01c9b Mon Sep 17 00:00:00 2001 From: Yara97Mansour <165589291+Yara97Mansour@users.noreply.github.com> Date: Thu, 29 Aug 2024 14:54:04 +0300 Subject: [PATCH 46/46] Update unify.py --- semantic_router/llms/unify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/semantic_router/llms/unify.py b/semantic_router/llms/unify.py index 770dd189..a0e78a8b 100644 --- a/semantic_router/llms/unify.py +++ b/semantic_router/llms/unify.py @@ -27,8 +27,8 @@ def __init__( Async: bool = False, ): if name is None: - name = f"{EncoderDefault.UNIFY.value['language_model']}\ - @{EncoderDefault.UNIFY.value['language_provider']}" + name = (f"{EncoderDefault.UNIFY.value['language_model']}"+ + f"@{EncoderDefault.UNIFY.value['language_provider']}") super().__init__(name=name) self.temperature = temperature