diff --git a/README.md b/README.md index 0ef774a..c963df8 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Python Client for Bard, a Chat Based AI tool by Google. ## Features - Connect to Bard, Google's AI-powered personal assistant. -- Ask questions and have a continuous conversation. +- Ask questions and have a conversation in various tones. - Use asyncio for efficient and non-blocking I/O operations. ## Requirements @@ -135,6 +135,27 @@ async with BardClient() as bard: print(response) ``` + +### Tone + +You can set the tone when having a conversation with Bard: + +```python +async with BardClient() as bard: + _ = await bard.ask("When was Bard released?") + + response = await bard.ask("When was Bard released?", tone="Professional") + print(response) +``` + +The available options for the `tone` parameter are: +- `Professional` +- `Casual` + +> [!NOTE] +> It is recommended to use the `tone` parameter on subsequent prompts and not in the first one. This is because this feature is typically used to change the previous response, rather than define the entire conversation tone. + + ### Exceptions When something goes wrong, Sydney.py might throw one of the following exceptions: @@ -143,6 +164,7 @@ When something goes wrong, Sydney.py might throw one of the following exceptions |-------------------------------|-------------------------------------------|--------------------------| | `CreateConversationException` | Failed to create conversation | Retry or use new cookies | | `AskException` | Failed to get response from Bard | Retry or use new cookies | +| `NoResponseException` | Received an empty response from Bard | Wait and retry | *For more detailed documentation and options, please refer to the code docstrings.* diff --git a/bard/bard.py b/bard/bard.py index 07de171..42bbabe 100644 --- a/bard/bard.py +++ b/bard/bard.py @@ -5,7 +5,12 @@ from aiohttp import ClientSession from bard.constants import BARD_STREAM_GENERATE_URL, BARD_URL, BARD_VERSION, HEADERS -from bard.exceptions import AskException, CreateConversationException +from bard.enums import ConversationTone +from bard.exceptions import ( + AskException, + CreateConversationException, + NoResponseException, +) from bard.utils import double_json_stringify, random_digit_as_string @@ -58,7 +63,12 @@ def _build_ask_parameters(self) -> dict: "rt": "c", } - def _build_ask_arguments(self, prompt: str) -> dict: + def _build_ask_arguments(self, prompt: str, tone: str | None) -> dict: + conversation_arguments = None + if tone: + tone_value = getattr(ConversationTone, tone.upper()).value + conversation_arguments = [0, [tone_value], None, None, None, None, []] + request_data = [ [prompt, 0, None, [], None, None, 0], [""], # TODO: Support language codes, like "en" @@ -72,7 +82,7 @@ def _build_ask_arguments(self, prompt: str) -> dict: ], "", # TODO: Find what this is "", # TODO: Find what this is - None, + conversation_arguments, [1], 1, [], @@ -86,6 +96,34 @@ def _build_ask_arguments(self, prompt: str) -> dict: "at": self.snlm0e, } + async def _ask(self, prompt: str, tone: str | None = None) -> str | None: + parameters = self._build_ask_parameters() + arguments = self._build_ask_arguments(prompt, tone) + + session = await self._get_session() + + async with session.post( + BARD_STREAM_GENERATE_URL, params=parameters, data=arguments + ) as response: + if response.status != 200: + raise AskException( + f"Failed to get response, received status: {response.status}" + ) + + response_text = await response.text() + response_data = json.loads(response_text.splitlines()[3]) + # No actual response in the returned data. + if not response_data or not response_data[0][2]: + return None + + message = json.loads(response_data[0][2]) + + self.conversation_id = message[1][0] + self.response_id = message[1][1] + self.choice_id = message[4][0][0] + + return message[4][0][1][0] + async def start_conversation(self) -> None: """ Connect to Bard and create a new conversation. @@ -109,7 +147,7 @@ async def start_conversation(self) -> None: self.snlm0e = snlm0e_dict.group("value") - async def ask(self, prompt: str) -> str: + async def ask(self, prompt: str, tone: str | None = None) -> str: """ Send a prompt to Bard and return the answer. @@ -117,35 +155,20 @@ async def ask(self, prompt: str) -> str: ---------- prompt: str The prompt that needs to be sent to Bard. + tone: str + The tone that Bard will use in the next response. If no value is + given, it will use a default tone. Returns ------- str The response from Bard. """ - parameters = self._build_ask_parameters() - arguments = self._build_ask_arguments(prompt) - - session = await self._get_session() - - async with session.post( - BARD_STREAM_GENERATE_URL, params=parameters, data=arguments - ) as response: - if response.status != 200: - raise AskException( - f"Failed to get response, received status: {response.status}" - ) - - response_text = await response.text() - response_data = json.loads(response_text.splitlines()[3]) - - message = json.loads(response_data[0][2]) + response = await self._ask(prompt=prompt, tone=tone) + if not response: + raise NoResponseException("No response was returned") - self.conversation_id = message[1][0] - self.response_id = message[1][1] - self.choice_id = message[4][0][0] - - return message[4][0][1][0] + return response async def reset_conversation(self) -> None: """ diff --git a/bard/enums.py b/bard/enums.py new file mode 100644 index 0000000..1fba418 --- /dev/null +++ b/bard/enums.py @@ -0,0 +1,16 @@ +from enum import Enum + + +class ConversationTone(Enum): + """ + Bard conversation tones. Supported options are: + - `Default` + - `Casual` + - `Simple` + - `Professional` + """ + + DEFAULT = 0 + CASUAL = 2 + SIMPLE = 4 + PROFESSIONAL = 5 diff --git a/bard/exceptions.py b/bard/exceptions.py index 6711629..69efd13 100644 --- a/bard/exceptions.py +++ b/bard/exceptions.py @@ -4,3 +4,7 @@ class CreateConversationException(Exception): class AskException(Exception): pass + + +class NoResponseException(Exception): + pass diff --git a/tests/test_ask.py b/tests/test_ask.py index 3a42a7f..39caf1b 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -25,6 +25,69 @@ async def test_ask() -> bool: assert False, f"Unexpected response: {response}, match score: {score}" +@pytest.mark.asyncio +async def test_ask_casual() -> bool: + expected_responses = [ + "Hey there! What's up?", + "Hey there! What's up? What can I do for you today?", + "Hey! What's up?", + "Hey! What's up? How can I be of assistance today?", + "Hey! What's up? What can I do for you today?", + ] + + async with BardClient() as bard: + _ = await bard.ask("Hello, Bard!") + + response = await bard.ask("Hello, Bard!", tone="Casual") + + score = 0 + for expected_response in expected_responses: + score = fuzz.token_sort_ratio(response, expected_response) + if score >= 80: + return True + + assert False, f"Unexpected response: {response}, match score: {score}" + + +@pytest.mark.asyncio +async def test_ask_professional() -> bool: + expected_responses = [ + "Good day. How may I assist you today?", + "Greetings and salutations! I am at your service. How may I be of assistance today?", + "Greetings! How can I be of assistance to you today?", + "Greetings! I am at your service, ready to assist with your inquiries and requests. Please feel free to ask me anything.", + "Greetings! I am at your service. Please let me know how I can assist you today.", + "Greetings! I am at your service. Please let me know how I can be of assistance.", + "Greetings! Please let me know how I can assist you today.", + "Greetings, and how may I assist you today?", + "Greetings, esteemed user. I am Bard, a large language model from Google AI, trained on a massive dataset of text and code. I am at your service", + "Greetings, esteemed user. I am at your service. Please let me know how I can assist you today.", + "Greetings, how can I assist you today?", + "Greetings. How can I be of assistance today?", + "Greetings. How may I be of assistance today?", + "Greetings. I am Bard, a large language model from Google AI. How may I be of assistance today?", + "Greetings. I am at your service. How may I assist you today?", + "Greetings. I am at your service. Please let me know how I can assist you today.", + "Greetings. I am at your service. Please let me know how I can be of assistance today.", + "Greetings. Please let me know how I can assist you today.", + "Greetings. Please let me know how I can be of assistance to you today.", + "Greetings. Please let me know how I can be of assistance.", + ] + + async with BardClient() as bard: + _ = await bard.ask("Hello, Bard!") + + response = await bard.ask("Hello, Bard!", tone="Professional") + + score = 0 + for expected_response in expected_responses: + score = fuzz.token_sort_ratio(response, expected_response) + if score >= 80: + return True + + assert False, f"Unexpected response: {response}, match score: {score}" + + @pytest.mark.asyncio async def test_ask_multiple_prompts() -> None: async with BardClient() as bard: