Skip to content
This repository has been archived by the owner on Jul 31, 2024. It is now read-only.

Commit

Permalink
Add support for tone parameter (#14)
Browse files Browse the repository at this point in the history
* Add support for tone parameter

* Add tests for tone parameter

* Update README
  • Loading branch information
vsakkas committed Nov 22, 2023
1 parent 40f6ae7 commit 0792e73
Show file tree
Hide file tree
Showing 5 changed files with 155 additions and 27 deletions.
24 changes: 23 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Python Client for Bard, a Chat Based AI tool by Google.
## Features

- Connect to Bard, Google's AI-powered personal assistant.
- Ask questions and have a continuous conversation.
- Ask questions and have a conversation in various tones.
- Use asyncio for efficient and non-blocking I/O operations.

## Requirements
Expand Down Expand Up @@ -135,6 +135,27 @@ async with BardClient() as bard:
print(response)
```


### Tone

You can set the tone when having a conversation with Bard:

```python
async with BardClient() as bard:
_ = await bard.ask("When was Bard released?")

response = await bard.ask("When was Bard released?", tone="Professional")
print(response)
```

The available options for the `tone` parameter are:
- `Professional`
- `Casual`

> [!NOTE]
> It is recommended to use the `tone` parameter on subsequent prompts and not in the first one. This is because this feature is typically used to change the previous response, rather than define the entire conversation tone.

### Exceptions

When something goes wrong, Sydney.py might throw one of the following exceptions:
Expand All @@ -143,6 +164,7 @@ When something goes wrong, Sydney.py might throw one of the following exceptions
|-------------------------------|-------------------------------------------|--------------------------|
| `CreateConversationException` | Failed to create conversation | Retry or use new cookies |
| `AskException` | Failed to get response from Bard | Retry or use new cookies |
| `NoResponseException` | Received an empty response from Bard | Wait and retry |

*For more detailed documentation and options, please refer to the code docstrings.*

Expand Down
75 changes: 49 additions & 26 deletions bard/bard.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@
from aiohttp import ClientSession

from bard.constants import BARD_STREAM_GENERATE_URL, BARD_URL, BARD_VERSION, HEADERS
from bard.exceptions import AskException, CreateConversationException
from bard.enums import ConversationTone
from bard.exceptions import (
AskException,
CreateConversationException,
NoResponseException,
)
from bard.utils import double_json_stringify, random_digit_as_string


Expand Down Expand Up @@ -58,7 +63,12 @@ def _build_ask_parameters(self) -> dict:
"rt": "c",
}

def _build_ask_arguments(self, prompt: str) -> dict:
def _build_ask_arguments(self, prompt: str, tone: str | None) -> dict:
conversation_arguments = None
if tone:
tone_value = getattr(ConversationTone, tone.upper()).value
conversation_arguments = [0, [tone_value], None, None, None, None, []]

request_data = [
[prompt, 0, None, [], None, None, 0],
[""], # TODO: Support language codes, like "en"
Expand All @@ -72,7 +82,7 @@ def _build_ask_arguments(self, prompt: str) -> dict:
],
"", # TODO: Find what this is
"", # TODO: Find what this is
None,
conversation_arguments,
[1],
1,
[],
Expand All @@ -86,6 +96,34 @@ def _build_ask_arguments(self, prompt: str) -> dict:
"at": self.snlm0e,
}

async def _ask(self, prompt: str, tone: str | None = None) -> str | None:
parameters = self._build_ask_parameters()
arguments = self._build_ask_arguments(prompt, tone)

session = await self._get_session()

async with session.post(
BARD_STREAM_GENERATE_URL, params=parameters, data=arguments
) as response:
if response.status != 200:
raise AskException(
f"Failed to get response, received status: {response.status}"
)

response_text = await response.text()
response_data = json.loads(response_text.splitlines()[3])
# No actual response in the returned data.
if not response_data or not response_data[0][2]:
return None

message = json.loads(response_data[0][2])

self.conversation_id = message[1][0]
self.response_id = message[1][1]
self.choice_id = message[4][0][0]

return message[4][0][1][0]

async def start_conversation(self) -> None:
"""
Connect to Bard and create a new conversation.
Expand All @@ -109,43 +147,28 @@ async def start_conversation(self) -> None:

self.snlm0e = snlm0e_dict.group("value")

async def ask(self, prompt: str) -> str:
async def ask(self, prompt: str, tone: str | None = None) -> str:
"""
Send a prompt to Bard and return the answer.
Parameters
----------
prompt: str
The prompt that needs to be sent to Bard.
tone: str
The tone that Bard will use in the next response. If no value is
given, it will use a default tone.
Returns
-------
str
The response from Bard.
"""
parameters = self._build_ask_parameters()
arguments = self._build_ask_arguments(prompt)

session = await self._get_session()

async with session.post(
BARD_STREAM_GENERATE_URL, params=parameters, data=arguments
) as response:
if response.status != 200:
raise AskException(
f"Failed to get response, received status: {response.status}"
)

response_text = await response.text()
response_data = json.loads(response_text.splitlines()[3])

message = json.loads(response_data[0][2])
response = await self._ask(prompt=prompt, tone=tone)
if not response:
raise NoResponseException("No response was returned")

self.conversation_id = message[1][0]
self.response_id = message[1][1]
self.choice_id = message[4][0][0]

return message[4][0][1][0]
return response

async def reset_conversation(self) -> None:
"""
Expand Down
16 changes: 16 additions & 0 deletions bard/enums.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from enum import Enum


class ConversationTone(Enum):
"""
Bard conversation tones. Supported options are:
- `Default`
- `Casual`
- `Simple`
- `Professional`
"""

DEFAULT = 0
CASUAL = 2
SIMPLE = 4
PROFESSIONAL = 5
4 changes: 4 additions & 0 deletions bard/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,7 @@ class CreateConversationException(Exception):

class AskException(Exception):
pass


class NoResponseException(Exception):
pass
63 changes: 63 additions & 0 deletions tests/test_ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,69 @@ async def test_ask() -> bool:
assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_casual() -> bool:
expected_responses = [
"Hey there! What's up?",
"Hey there! What's up? What can I do for you today?",
"Hey! What's up?",
"Hey! What's up? How can I be of assistance today?",
"Hey! What's up? What can I do for you today?",
]

async with BardClient() as bard:
_ = await bard.ask("Hello, Bard!")

response = await bard.ask("Hello, Bard!", tone="Casual")

score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 80:
return True

assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_professional() -> bool:
expected_responses = [
"Good day. How may I assist you today?",
"Greetings and salutations! I am at your service. How may I be of assistance today?",
"Greetings! How can I be of assistance to you today?",
"Greetings! I am at your service, ready to assist with your inquiries and requests. Please feel free to ask me anything.",
"Greetings! I am at your service. Please let me know how I can assist you today.",
"Greetings! I am at your service. Please let me know how I can be of assistance.",
"Greetings! Please let me know how I can assist you today.",
"Greetings, and how may I assist you today?",
"Greetings, esteemed user. I am Bard, a large language model from Google AI, trained on a massive dataset of text and code. I am at your service",
"Greetings, esteemed user. I am at your service. Please let me know how I can assist you today.",
"Greetings, how can I assist you today?",
"Greetings. How can I be of assistance today?",
"Greetings. How may I be of assistance today?",
"Greetings. I am Bard, a large language model from Google AI. How may I be of assistance today?",
"Greetings. I am at your service. How may I assist you today?",
"Greetings. I am at your service. Please let me know how I can assist you today.",
"Greetings. I am at your service. Please let me know how I can be of assistance today.",
"Greetings. Please let me know how I can assist you today.",
"Greetings. Please let me know how I can be of assistance to you today.",
"Greetings. Please let me know how I can be of assistance.",
]

async with BardClient() as bard:
_ = await bard.ask("Hello, Bard!")

response = await bard.ask("Hello, Bard!", tone="Professional")

score = 0
for expected_response in expected_responses:
score = fuzz.token_sort_ratio(response, expected_response)
if score >= 80:
return True

assert False, f"Unexpected response: {response}, match score: {score}"


@pytest.mark.asyncio
async def test_ask_multiple_prompts() -> None:
async with BardClient() as bard:
Expand Down

0 comments on commit 0792e73

Please sign in to comment.