From 95874184983d0ca8a44aef7cc469ca62e574ef2d Mon Sep 17 00:00:00 2001 From: Priyadarshi Roy Date: Sat, 23 Mar 2024 12:40:09 +0530 Subject: [PATCH] Refactored providers into their own folder. Also added support Google Gemini models --- config.json | 2 +- docs/docs/gpt-researcher/config.md | 4 +- gpt_researcher/config/config.py | 2 +- gpt_researcher/llm_provider/__init__.py | 7 + .../llm_provider/google/__init__.py | 0 gpt_researcher/llm_provider/google/google.py | 103 ++++++ .../llm_provider/openai/__init__.py | 0 gpt_researcher/llm_provider/openai/openai.py | 72 +++++ gpt_researcher/utils/llm.py | 91 ++---- poetry.lock | 298 +++++++++++++++++- pyproject.toml | 1 + requirements.txt | 3 +- 12 files changed, 519 insertions(+), 64 deletions(-) create mode 100644 gpt_researcher/llm_provider/__init__.py create mode 100644 gpt_researcher/llm_provider/google/__init__.py create mode 100644 gpt_researcher/llm_provider/google/google.py create mode 100644 gpt_researcher/llm_provider/openai/__init__.py create mode 100644 gpt_researcher/llm_provider/openai/openai.py diff --git a/config.json b/config.json index e7b82ef9a..aef4b1629 100644 --- a/config.json +++ b/config.json @@ -5,7 +5,7 @@ "selenium_web_browser": "chrome", "search_api": "tavily", "embedding_provider": "openai", - "llm_provider": "ChatOpenAI", + "llm_provider": "openai", "fast_llm_model": "gpt-3.5-turbo-16k", "smart_llm_model": "gpt-4", "fast_token_limit": 2000, diff --git a/docs/docs/gpt-researcher/config.md b/docs/docs/gpt-researcher/config.md index 90f2b83b8..3eea7cab7 100644 --- a/docs/docs/gpt-researcher/config.md +++ b/docs/docs/gpt-researcher/config.md @@ -22,7 +22,7 @@ Here is an example of the default config.py file found in `/gpt_researcher/confi def __init__(self, config_file: str = None): self.config_file = config_file self.retriever = "tavily" - self.llm_provider = "ChatOpenAI" + self.llm_provider = "openai" self.fast_llm_model = "gpt-3.5-turbo-16k" self.smart_llm_model = "gpt-4-1106-preview" self.fast_token_limit = 2000 @@ -42,7 +42,7 @@ def __init__(self, config_file: str = None): Please note that you can also include your own external JSON file by adding the path in the `config_file` param. -To learn more about additional LLM support you can check out the [Langchain Adapter](https://python.langchain.com/docs/guides/adapters/openai) and [Langchain supported LLMs](https://python.langchain.com/docs/integrations/llms/) documentation. Simply pass different model names in the `llm_provider` config param. +To learn more about additional LLM support you can check out the [Langchain Adapter](https://python.langchain.com/docs/guides/adapters/openai) and [Langchain supported LLMs](https://python.langchain.com/docs/integrations/llms/) documentation. Simply pass different provider names in the `llm_provider` config param. You can also change the search engine by modifying the `retriever` param to others such as `duckduckgo`, `googleAPI`, `googleSerp`, `searx` and more. diff --git a/gpt_researcher/config/config.py b/gpt_researcher/config/config.py index 3878b18ee..8aa43c640 100644 --- a/gpt_researcher/config/config.py +++ b/gpt_researcher/config/config.py @@ -11,7 +11,7 @@ def __init__(self, config_file: str = None): self.config_file = config_file if config_file else os.getenv('CONFIG_FILE') self.retriever = os.getenv('SEARCH_RETRIEVER', "tavily") self.embedding_provider = os.getenv('EMBEDDING_PROVIDER', 'openai') - self.llm_provider = os.getenv('LLM_PROVIDER', "ChatOpenAI") + self.llm_provider = os.getenv('LLM_PROVIDER', "openai") self.fast_llm_model = os.getenv('FAST_LLM_MODEL', "gpt-3.5-turbo-16k") self.smart_llm_model = os.getenv('SMART_LLM_MODEL', "gpt-4-1106-preview") self.fast_token_limit = int(os.getenv('FAST_TOKEN_LIMIT', 2000)) diff --git a/gpt_researcher/llm_provider/__init__.py b/gpt_researcher/llm_provider/__init__.py new file mode 100644 index 000000000..dbe596814 --- /dev/null +++ b/gpt_researcher/llm_provider/__init__.py @@ -0,0 +1,7 @@ +from .google.google import GoogleProvider +from .openai.openai import OpenAIProvider + +__all__ = [ + "GoogleProvider", + "OpenAIProvider" +] diff --git a/gpt_researcher/llm_provider/google/__init__.py b/gpt_researcher/llm_provider/google/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/gpt_researcher/llm_provider/google/google.py b/gpt_researcher/llm_provider/google/google.py new file mode 100644 index 000000000..c0ef8fce1 --- /dev/null +++ b/gpt_researcher/llm_provider/google/google.py @@ -0,0 +1,103 @@ +import os + +from colorama import Fore, Style +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_google_genai import ChatGoogleGenerativeAI + + +class GoogleProvider: + + def __init__( + self, + model, + temperature, + max_tokens + ): + # May be extended to support more google models in the future + self.model = "gemini-pro" + self.temperature = temperature + self.max_tokens = max_tokens + self.api_key = self.get_api_key() + self.llm = self.get_llm_model() + + def get_api_key(self): + """ + Gets the GEMINI_API_KEY + Returns: + + """ + try: + api_key = os.environ["GEMINI_API_KEY"] + except: + raise Exception( + "GEMINI API key not found. Please set the GEMINI_API_KEY environment variable.") + return api_key + + def get_llm_model(self): + # Initializing the chat model + llm = ChatGoogleGenerativeAI( + convert_system_message_to_human=True, + model=self.model, + temperature=self.temperature, + max_output_tokens=self.max_tokens, + google_api_key=self.api_key + ) + + return llm + + def convert_messages(self, messages): + """ + The function `convert_messages` converts messages based on their role into either SystemMessage + or HumanMessage objects. + + Args: + messages: It looks like the code snippet you provided is a function called `convert_messages` + that takes a list of messages as input and converts each message based on its role into either a + `SystemMessage` or a `HumanMessage`. + + Returns: + The `convert_messages` function is returning a list of converted messages based on the input + `messages`. The function checks the role of each message in the input list and creates a new + `SystemMessage` object if the role is "system" or a new `HumanMessage` object if the role is + "user". The function then returns a list of these converted messages. + """ + converted_messages = [] + for message in messages: + if message["role"] == "system": + converted_messages.append( + SystemMessage(content=message["content"])) + elif message["role"] == "user": + converted_messages.append( + HumanMessage(content=message["content"])) + + return converted_messages + + async def get_chat_response(self, messages, stream, websocket=None): + if not stream: + # Getting output from the model chain using ainvoke for asynchronous invoking + converted_messages = self.convert_messages(messages) + output = await self.llm.ainvoke(converted_messages) + + return output.content + + else: + return await self.stream_response(messages, websocket) + + async def stream_response(self, messages, websocket=None): + paragraph = "" + response = "" + + # Streaming the response using the chain astream method from langchain + async for chunk in self.llm.astream(messages): + content = chunk.content + if content is not None: + response += content + paragraph += content + if "\n" in paragraph: + if websocket is not None: + await websocket.send_json({"type": "report", "output": paragraph}) + else: + print(f"{Fore.GREEN}{paragraph}{Style.RESET_ALL}") + paragraph = "" + + return response diff --git a/gpt_researcher/llm_provider/openai/__init__.py b/gpt_researcher/llm_provider/openai/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/gpt_researcher/llm_provider/openai/openai.py b/gpt_researcher/llm_provider/openai/openai.py new file mode 100644 index 000000000..a45ae261e --- /dev/null +++ b/gpt_researcher/llm_provider/openai/openai.py @@ -0,0 +1,72 @@ +import os + +from colorama import Fore, Style +from langchain_openai import ChatOpenAI + + +class OpenAIProvider: + + def __init__( + self, + model, + temperature, + max_tokens + ): + self.model = model + self.temperature = temperature + self.max_tokens = max_tokens + self.api_key = self.get_api_key() + self.llm = self.get_llm_model() + + def get_api_key(self): + """ + Gets the OpenAI API key + Returns: + + """ + try: + api_key = os.environ["OPENAI_API_KEY"] + except: + raise Exception( + "OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.") + return api_key + + def get_llm_model(self): + # Initializing the chat model + llm = ChatOpenAI( + model=self.model, + temperature=self.temperature, + max_tokens=self.max_tokens, + api_key=self.api_key + ) + + return llm + + async def get_chat_response(self, messages, stream, websocket=None): + if not stream: + # Getting output from the model chain using ainvoke for asynchronous invoking + output = await self.llm.ainvoke(messages) + + return output.content + + else: + return await self.stream_response(messages, websocket) + + async def stream_response(self, messages, websocket=None): + paragraph = "" + response = "" + + # Streaming the response using the chain astream method from langchain + async for chunk in self.llm.astream(messages): + content = chunk.content + if content is not None: + response += content + paragraph += content + if "\n" in paragraph: + if websocket is not None: + await websocket.send_json({"type": "report", "output": paragraph}) + else: + print(f"{Fore.GREEN}{paragraph}{Style.RESET_ALL}") + paragraph = "" + + return response diff --git a/gpt_researcher/utils/llm.py b/gpt_researcher/utils/llm.py index 9dd1c3a8c..d1aa195f8 100644 --- a/gpt_researcher/utils/llm.py +++ b/gpt_researcher/utils/llm.py @@ -1,13 +1,31 @@ # libraries from __future__ import annotations +import logging + import json -from fastapi import WebSocket -from colorama import Fore, Style from typing import Optional -from langchain_openai import ChatOpenAI + +from colorama import Fore, Style +from fastapi import WebSocket + from gpt_researcher.master.prompts import auto_agent_instructions +def get_provider(llm_provider): + match llm_provider: + case "openai": + from ..llm_provider import OpenAIProvider + llm_provider = OpenAIProvider + case "google": + from ..llm_provider import GoogleProvider + llm_provider = GoogleProvider + + case _: + raise Exception("LLM provider not found.") + + return llm_provider + + async def create_chat_completion( messages: list, # type: ignore model: Optional[str] = None, @@ -34,12 +52,21 @@ async def create_chat_completion( if model is None: raise ValueError("Model cannot be None") if max_tokens is not None and max_tokens > 8001: - raise ValueError(f"Max tokens cannot be more than 8001, but got {max_tokens}") + raise ValueError( + f"Max tokens cannot be more than 8001, but got {max_tokens}") + + # Get the provider from supported providers + ProviderClass = get_provider(llm_provider) + provider = ProviderClass( + model, + temperature, + max_tokens + ) # create response for _ in range(10): # maximum of 10 attempts - response = await send_chat_completion_request( - messages, model, temperature, max_tokens, stream, llm_provider, websocket + response = await provider.get_chat_response( + messages, stream, websocket ) return response @@ -47,58 +74,6 @@ async def create_chat_completion( raise RuntimeError("Failed to get response from OpenAI API") -import logging - - -async def send_chat_completion_request( - messages, model, temperature, max_tokens, stream, llm_provider, websocket=None -): - if not stream: - # Initializing the chat model - chat = ChatOpenAI( - model=model, - temperature=temperature, - max_tokens=max_tokens - ) - - # Getting output from the model chain using ainvoke for asynchronous invoking - output = await chat.ainvoke(messages) - - return output.content - - else: - return await stream_response( - model, messages, temperature, max_tokens, llm_provider, websocket - ) - - -async def stream_response(model, messages, temperature, max_tokens, llm_provider, websocket=None): - # Initializing the model - chat = ChatOpenAI( - model=model, - temperature=temperature, - max_tokens=max_tokens - ) - - paragraph = "" - response = "" - - # Streaming the response using the chain astream method from langchain - async for chunk in chat.astream(messages): - content = chunk.content - if content is not None: - response += content - paragraph += content - if "\n" in paragraph: - if websocket is not None: - await websocket.send_json({"type": "report", "output": paragraph}) - else: - print(f"{Fore.GREEN}{paragraph}{Style.RESET_ALL}") - paragraph = "" - - return response - - def choose_agent(smart_llm_model: str, llm_provider: str, task: str) -> dict: """Determines what server should be used Args: diff --git a/poetry.lock b/poetry.lock index c521871b4..66757a453 100644 --- a/poetry.lock +++ b/poetry.lock @@ -359,6 +359,17 @@ files = [ [package.dependencies] cffi = ">=1.0.0" +[[package]] +name = "cachetools" +version = "5.3.3" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, +] + [[package]] name = "certifi" version = "2023.11.17" @@ -901,6 +912,115 @@ files = [ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, ] +[[package]] +name = "google-ai-generativelanguage" +version = "0.4.0" +description = "Google Ai Generativelanguage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-ai-generativelanguage-0.4.0.tar.gz", hash = "sha256:c8199066c08f74c4e91290778329bb9f357ba1ea5d6f82de2bc0d10552bf4f8c"}, + {file = "google_ai_generativelanguage-0.4.0-py3-none-any.whl", hash = "sha256:e4c425376c1ee26c78acbc49a24f735f90ebfa81bf1a06495fae509a2433232c"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-api-core" +version = "2.18.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.18.0.tar.gz", hash = "sha256:62d97417bfc674d6cef251e5c4d639a9655e00c45528c4364fbfebb478ce72a9"}, + {file = "google_api_core-2.18.0-py3-none-any.whl", hash = "sha256:5a63aa102e0049abe85b5b88cb9409234c1f70afcda21ce1e40b285b9629c1d6"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = [ + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +grpcio-status = [ + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-auth" +version = "2.29.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, + {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-generativeai" +version = "0.4.1" +description = "Google Generative AI High level API client library and tools." +optional = false +python-versions = ">=3.9" +files = [ + {file = "google_generativeai-0.4.1-py3-none-any.whl", hash = "sha256:89be3c00c2e688108fccefc50f47f45fc9d37ecd53c1ade9d86b5d982919c24a"}, +] + +[package.dependencies] +google-ai-generativelanguage = "0.4.0" +google-api-core = "*" +google-auth = ">=2.15.0" +protobuf = "*" +pydantic = "*" +tqdm = "*" +typing-extensions = "*" + +[package.extras] +dev = ["Pillow", "absl-py", "black", "ipython", "nose2", "pandas", "pytype", "pyyaml"] + +[[package]] +name = "googleapis-common-protos" +version = "1.63.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.63.0.tar.gz", hash = "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e"}, + {file = "googleapis_common_protos-1.63.0-py2.py3-none-any.whl", hash = "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"}, +] + +[package.dependencies] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + [[package]] name = "greenlet" version = "3.0.0" @@ -976,6 +1096,88 @@ files = [ docs = ["Sphinx"] test = ["objgraph", "psutil"] +[[package]] +name = "grpcio" +version = "1.62.1" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e"}, + {file = "grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70"}, + {file = "grpcio-1.62.1-cp310-cp310-win32.whl", hash = "sha256:4187201a53f8561c015bc745b81a1b2d278967b8de35f3399b84b0695e281d5f"}, + {file = "grpcio-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:844d1f3fb11bd1ed362d3fdc495d0770cfab75761836193af166fee113421d66"}, + {file = "grpcio-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:833379943d1728a005e44103f17ecd73d058d37d95783eb8f0b28ddc1f54d7b2"}, + {file = "grpcio-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c7fcc6a32e7b7b58f5a7d27530669337a5d587d4066060bcb9dee7a8c833dfb7"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:fa7d28eb4d50b7cbe75bb8b45ed0da9a1dc5b219a0af59449676a29c2eed9698"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48f7135c3de2f298b833be8b4ae20cafe37091634e91f61f5a7eb3d61ec6f660"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f11fd63365ade276c9d4a7b7df5c136f9030e3457107e1791b3737a9b9ed6a"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b49fd8fe9f9ac23b78437da94c54aa7e9996fbb220bac024a67469ce5d0825f"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:482ae2ae78679ba9ed5752099b32e5fe580443b4f798e1b71df412abf43375db"}, + {file = "grpcio-1.62.1-cp311-cp311-win32.whl", hash = "sha256:1faa02530b6c7426404372515fe5ddf66e199c2ee613f88f025c6f3bd816450c"}, + {file = "grpcio-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bd90b8c395f39bc82a5fb32a0173e220e3f401ff697840f4003e15b96d1befc"}, + {file = "grpcio-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b134d5d71b4e0837fff574c00e49176051a1c532d26c052a1e43231f252d813b"}, + {file = "grpcio-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d1f6c96573dc09d50dbcbd91dbf71d5cf97640c9427c32584010fbbd4c0e0037"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:359f821d4578f80f41909b9ee9b76fb249a21035a061a327f91c953493782c31"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a485f0c2010c696be269184bdb5ae72781344cb4e60db976c59d84dd6354fac9"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50b09b4dc01767163d67e1532f948264167cd27f49e9377e3556c3cba1268e1"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3227c667dccbe38f2c4d943238b887bac588d97c104815aecc62d2fd976e014b"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3952b581eb121324853ce2b191dae08badb75cd493cb4e0243368aa9e61cfd41"}, + {file = "grpcio-1.62.1-cp312-cp312-win32.whl", hash = "sha256:83a17b303425104d6329c10eb34bba186ffa67161e63fa6cdae7776ff76df73f"}, + {file = "grpcio-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:6696ffe440333a19d8d128e88d440f91fb92c75a80ce4b44d55800e656a3ef1d"}, + {file = "grpcio-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:e3393b0823f938253370ebef033c9fd23d27f3eae8eb9a8f6264900c7ea3fb5a"}, + {file = "grpcio-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83e7ccb85a74beaeae2634f10eb858a0ed1a63081172649ff4261f929bacfd22"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:882020c87999d54667a284c7ddf065b359bd00251fcd70279ac486776dbf84ec"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a10383035e864f386fe096fed5c47d27a2bf7173c56a6e26cffaaa5a361addb1"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:960edebedc6b9ada1ef58e1c71156f28689978188cd8cff3b646b57288a927d9"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:23e2e04b83f347d0aadde0c9b616f4726c3d76db04b438fd3904b289a725267f"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978121758711916d34fe57c1f75b79cdfc73952f1481bb9583399331682d36f7"}, + {file = "grpcio-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9084086190cc6d628f282e5615f987288b95457292e969b9205e45b442276407"}, + {file = "grpcio-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:22bccdd7b23c420a27fd28540fb5dcbc97dc6be105f7698cb0e7d7a420d0e362"}, + {file = "grpcio-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:8999bf1b57172dbc7c3e4bb3c732658e918f5c333b2942243f10d0d653953ba9"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d9e52558b8b8c2f4ac05ac86344a7417ccdd2b460a59616de49eb6933b07a0bd"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1714e7bc935780bc3de1b3fcbc7674209adf5208ff825799d579ffd6cd0bd505"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8842ccbd8c0e253c1f189088228f9b433f7a93b7196b9e5b6f87dba393f5d5d"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f1e7b36bdff50103af95a80923bf1853f6823dd62f2d2a2524b66ed74103e49"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bba97b8e8883a8038606480d6b6772289f4c907f6ba780fa1f7b7da7dfd76f06"}, + {file = "grpcio-1.62.1-cp38-cp38-win32.whl", hash = "sha256:a7f615270fe534548112a74e790cd9d4f5509d744dd718cd442bf016626c22e4"}, + {file = "grpcio-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:e6c8c8693df718c5ecbc7babb12c69a4e3677fd11de8886f05ab22d4e6b1c43b"}, + {file = "grpcio-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:73db2dc1b201d20ab7083e7041946910bb991e7e9761a0394bbc3c2632326483"}, + {file = "grpcio-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:407b26b7f7bbd4f4751dbc9767a1f0716f9fe72d3d7e96bb3ccfc4aace07c8de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f8de7c8cef9261a2d0a62edf2ccea3d741a523c6b8a6477a340a1f2e417658de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd5c8a1af40ec305d001c60236308a67e25419003e9bb3ebfab5695a8d0b369"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0477cb31da67846a33b1a75c611f88bfbcd427fe17701b6317aefceee1b96f"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:60dcd824df166ba266ee0cfaf35a31406cd16ef602b49f5d4dfb21f014b0dedd"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:973c49086cabab773525f6077f95e5a993bfc03ba8fc32e32f2c279497780585"}, + {file = "grpcio-1.62.1-cp39-cp39-win32.whl", hash = "sha256:12859468e8918d3bd243d213cd6fd6ab07208195dc140763c00dfe901ce1e1b4"}, + {file = "grpcio-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7209117bbeebdfa5d898205cc55153a51285757902dd73c47de498ad4d11332"}, + {file = "grpcio-1.62.1.tar.gz", hash = "sha256:6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.62.1)"] + +[[package]] +name = "grpcio-status" +version = "1.62.1" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.6" +files = [ + {file = "grpcio-status-1.62.1.tar.gz", hash = "sha256:3431c8abbab0054912c41df5c72f03ddf3b7a67be8a287bb3c18a3456f96ff77"}, + {file = "grpcio_status-1.62.1-py3-none-any.whl", hash = "sha256:af0c3ab85da31669f21749e8d53d669c061ebc6ce5637be49a46edcb7aa8ab17"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.62.1" +protobuf = ">=4.21.6" + [[package]] name = "h11" version = "0.14.0" @@ -1233,6 +1435,24 @@ tenacity = ">=8.1.0,<9.0.0" [package.extras] extended-testing = ["jinja2 (>=3,<4)"] +[[package]] +name = "langchain-google-genai" +version = "0.0.11" +description = "An integration package connecting Google's genai package and LangChain" +optional = false +python-versions = ">=3.9,<4.0" +files = [ + {file = "langchain_google_genai-0.0.11-py3-none-any.whl", hash = "sha256:7e42900bf6850f58a6f9088d885f450ca9a2653c17cee5c19f3599ac17a285bd"}, + {file = "langchain_google_genai-0.0.11.tar.gz", hash = "sha256:378b080e90d7ea1d6164b65847c5b1053346bb0781e629de9c2a3e4bb725317a"}, +] + +[package.dependencies] +google-generativeai = ">=0.4.1,<0.5.0" +langchain-core = ">=0.1,<0.2" + +[package.extras] +images = ["pillow (>=10.1.0,<11.0.0)"] + [[package]] name = "langchain-openai" version = "0.1.0" @@ -1934,6 +2154,68 @@ files = [ greenlet = "3.0.0" pyee = "11.0.1" +[[package]] +name = "proto-plus" +version = "1.23.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.6" +files = [ + {file = "proto-plus-1.23.0.tar.gz", hash = "sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2"}, + {file = "proto_plus-1.23.0-py3-none-any.whl", hash = "sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<5.0.0dev" + +[package.extras] +testing = ["google-api-core[grpc] (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "4.25.3" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, + {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, + {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, + {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, +] + +[[package]] +name = "pyasn1" +version = "0.5.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, + {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.6.0" + [[package]] name = "pycparser" version = "2.21" @@ -2451,6 +2733,20 @@ files = [ [package.dependencies] requests = ">=1.0.0" +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + [[package]] name = "selenium" version = "4.15.2" @@ -3121,4 +3417,4 @@ test = ["pytest"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<4.0" -content-hash = "0d3c10c2585ae58471fb426a20ad7995ccffa41665ad50a8130acbb5616db599" +content-hash = "c08204555d477300f23f56ba27cc329a39ab331430b67bdd41843c5d89afbbc4" diff --git a/pyproject.toml b/pyproject.toml index e7018ae77..bc784dd12 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ mistune = "^3.0.2" htmldocx = "^0.0.6" python-docx = "^1.1.0" langchain-openai = "^0.1.0" +langchain-google-genai = "^0.0.11" [build-system] diff --git a/requirements.txt b/requirements.txt index a0bde8fdc..88ec9a826 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,4 +28,5 @@ SQLAlchemy langchain-openai mistune python-docx -htmldocx \ No newline at end of file +htmldocx +langchain-google-genai \ No newline at end of file