Skip to content

Commit

Permalink
Minor fixes of documentation and prompt (#174)
Browse files Browse the repository at this point in the history
  • Loading branch information
zechengz committed Jun 19, 2023
1 parent c30b4aa commit 8020e20
Show file tree
Hide file tree
Showing 7 changed files with 52 additions and 40 deletions.
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -415,3 +415,8 @@ tags

# Camel
logs/

# Data
datasets_test/
evaluation_data/
camel_data/
27 changes: 13 additions & 14 deletions camel/agents/role_playing.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def __init__(
self.critic = None

def init_chat(self) -> Tuple[AssistantChatMessage, List[ChatMessage]]:
r"""Initializes the chat by resetting both the assistant and user
r"""Initializes the chat by resetting both of the assistant and user
agents, and sending the system messages again to the agents using
chat messages. Returns the assistant's introductory message and the
user's response messages.
Expand Down Expand Up @@ -218,13 +218,13 @@ def process_messages(
self,
messages: Sequence[ChatMessage],
) -> ChatMessage:
r"""Processes a list of chat messages, returning the processed message.
If multiple messages are provided and `with_critic_in_the_loop`
is `False`, raises a `ValueError`. If no messages are provided, also
raises a `ValueError`.
r"""Processes a sequence of chat messages, returning the processed
message. If multiple messages are provided and
`with_critic_in_the_loop` is `False`, raises a `ValueError`.
If no messages are provided, a `ValueError` will be raised.
Args:
msgs: A list of `ChatMessage`s to process.
messages: A sequence of `ChatMessage` objects to process.
Returns:
A single `ChatMessage` representing the processed message.
Expand All @@ -248,23 +248,22 @@ def step(
r"""Advances the conversation by taking a message from the assistant,
processing it using the user agent, and then processing the resulting
message using the assistant agent. Returns a tuple containing the
resulting assistant message, whether or not the assistant agent
terminated the conversation, and any additional assistant information,
as well as a tuple containing the resulting user message, whether or
not the user agent terminated the conversation, and any additional user
information.
resulting assistant message, whether the assistant agent terminated
the conversation, and any additional assistant information, as well as
a tuple containing the resulting user message, whether the user agent
terminated the conversation, and any additional user information.
Args:
assistant_msg: A `ChatMessage` representing the message from the
assistant.
Returns:
A tuple containing two ChatAgentResponse: the first struct contains
the resulting assistant message, whether or not the assistant agent
the resulting assistant message, whether the assistant agent
terminated the conversation, and any additional assistant
information; the second struct contains the resulting user message,
whether or not the user agent terminated the conversation, and
any additional user information.
whether the user agent terminated the conversation, and any
additional user information.
"""
user_response = self.user_agent.step(assistant_msg)
if user_response.terminated or user_response.msgs is None:
Expand Down
10 changes: 5 additions & 5 deletions camel/agents/task_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@


class TaskSpecifyAgent(ChatAgent):
r"""An agent that Specifies a given task prompt by prompting the user to
r"""An agent that specifies a given task prompt by prompting the user to
provide more details.
Attributes:
Expand Down Expand Up @@ -160,12 +160,12 @@ def step(
task_msg = UserChatMessage(role_name="Task Planner",
content=self.task_planner_prompt)
# sub_tasks_msgs, terminated, _
task_tesponse = super().step(task_msg)
task_response = super().step(task_msg)

if task_tesponse.msgs is None:
if task_response.msgs is None:
raise RuntimeError("Got None Subtasks messages.")
if task_tesponse.terminated:
if task_response.terminated:
raise RuntimeError("Task planning failed.")

sub_tasks_msg = task_tesponse.msgs[0]
sub_tasks_msg = task_response.msgs[0]
return TextPrompt(sub_tasks_msg.content)
8 changes: 8 additions & 0 deletions camel/agents/tool_agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,5 +28,13 @@ def __init__(self, name: str, description: str) -> None:
self.name = name
self.description = description

def reset(self) -> None:
r"""Resets the agent to its initial state."""
pass

def step(self) -> None:
r"""Performs a single step of the agent."""
pass

def __str__(self) -> str:
return f"{self.name}: {self.description}"
38 changes: 19 additions & 19 deletions camel/agents/tool_agents/hugging_face_tool_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,7 @@ def __init__(
"pip install huggingface_hub==0.14.1 transformers==4.29.0 diffusers accelerate datasets torch soundfile sentencepiece opencv-python"
)
self.agent = OpenAiAgent(*args, **kwargs)
self.name = name
self.remote = remote
self.description = f"""The `{self.name}` is a tool agent that can perform a variety of tasks including:
description = f"""The `{name}` is a tool agent that can perform a variety of tasks including:
- Document question answering: given a document (such as a PDF) in image format, answer a question on this document
- Text question answering: given a long text and a question, answer the question in the text
- Unconditional image captioning: Caption the image!
Expand All @@ -73,73 +71,75 @@ def __init__(
Single execution (step) mode, the single execution method is when using the step() method of the agent:
```
# Text to image
rivers_and_lakes_image = {self.name}.step("Draw me a picture of rivers and lakes.")
rivers_and_lakes_image = {name}.step("Draw me a picture of rivers and lakes.")
rivers_and_lakes_image.save("./rivers_and_lakes_image.png")
# Text to image -> Image transformation
sea_add_island_image = {self.name}.step("Draw me a picture of the sea then transform the picture to add an island")
sea_add_island_image = {name}.step("Draw me a picture of the sea then transform the picture to add an island")
sea_add_island_image.save("./sea_add_island_image.png")
# If you'd like to keep a state across executions or to pass non-text objects to the agent,
# you can do so by specifying variables that you would like the agent to use. For example,
# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following:
picture = {self.name}.step("Generate a picture of rivers and lakes.")
picture = {name}.step("Generate a picture of rivers and lakes.")
picture.save("./picture.png")
updated_picture = {self.name}.step("Transform the image in `picture` to add an island to it.", picture=picture)
updated_picture = {name}.step("Transform the image in `picture` to add an island to it.", picture=picture)
updated_picture.save("./updated_picture.png")
capybara_sea_image = {self.name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea")
capybara_sea_image = {name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea")
capybara_sea_image.save("./capybara_sea_image.png")
# Document question answering
answer = {self.name}.step(
answer = {name}.step(
"In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?",
document=document,
)
print(answer)
# Text to image
boat_image = {self.name}.step("Generate an image of a boat in the water")
boat_image = {name}.step("Generate an image of a boat in the water")
boat_image.save("./boat_image.png")
# Unconditional image captioning
boat_image_caption = {self.name}.step("Can you caption the `boat_image`?", boat_image=boat_image)
boat_image_caption = {name}.step("Can you caption the `boat_image`?", boat_image=boat_image)
print(boat_image_caption)
# Text to image -> Unconditional image captioning -> Text to speech
boat_audio = {self.name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards")
boat_audio = {name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards")
# Text downloading
document = {self.name}.step("Download the text from http://hf.co")
document = {name}.step("Download the text from http://hf.co")
print(document)
# Text summarization
summary = {self.name}.step("Summarize the following text: `document`", document=document)
summary = {name}.step("Summarize the following text: `document`", document=document)
print(summary)
# Text downloading -> Text summarization -> Text to speech
audio = {self.name}.step("Read out loud the summary of http://hf.co")
audio = {name}.step("Read out loud the summary of http://hf.co")
```
Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method:
```
# Clean the chat history
{self.name}.reset()
{name}.reset()
# Text to image
capybara_image = {self.name}.chat("Show me an an image of a capybara")
capybara_image = {name}.chat("Show me an an image of a capybara")
capybara_image.save("./capybara_image.png")
# Image transformation
transformed_capybara_image = {self.name}.chat("Transform the image so that it snows")
transformed_capybara_image = {name}.chat("Transform the image so that it snows")
transformed_capybara_image.save("./transformed_capybara_image.png")
# Image segmentation
segmented_transformed_capybara_image = {self.name}.chat("Show me a mask of the snowy capybaras")
segmented_transformed_capybara_image = {name}.chat("Show me a mask of the snowy capybaras")
segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png")
```
"""
super(HuggingFaceToolAgent, self).__init__(name, description)
self.remote = remote

def reset(self) -> None:
r"""Resets the chat history of the agent."""
Expand Down
2 changes: 1 addition & 1 deletion camel/prompts/code.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class CodePromptTemplateDict(TextPromptDict):
)

ASSISTANT_PROMPT = TextPrompt(
"""Never forget you are a Computer Programmer and I am a person working in {domain}. Never flip roles! Never instruct me!
"""Never forget you are a Computer Programmer and I am a person working in {domain}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task using {language} programming language.
Here is the task: {task}. Never forget our task!
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pytest-cov

# Packages for huggingface tools
bs4
transformers["agent"]
transformers[agent]
diffusers==0.16.1
accelerate==0.19.0
datasets==2.12.0
Expand Down

0 comments on commit 8020e20

Please sign in to comment.