diff --git a/apps/agents/agents.py b/apps/agents/agents.py index c7555b4f4f..23c0be2604 100644 --- a/apps/agents/agents.py +++ b/apps/agents/agents.py @@ -29,7 +29,7 @@ from apps.agents.text_utils import split_markdown_code from camel.agents import TaskSpecifyAgent -from camel.messages import AssistantChatMessage +from camel.messages import BaseMessage from camel.societies import RolePlaying REPO_ROOT = os.path.realpath( @@ -43,17 +43,16 @@ class State: session: Optional[RolePlaying] max_messages: int chat: ChatBotHistory - saved_assistant_msg: Optional[AssistantChatMessage] + saved_assistant_msg: Optional[BaseMessage] @classmethod def empty(cls) -> 'State': return cls(None, 0, [], None) @staticmethod - def construct_inplace( - state: 'State', session: Optional[RolePlaying], max_messages: int, - chat: ChatBotHistory, - saved_assistant_msg: Optional[AssistantChatMessage]) -> None: + def construct_inplace(state: 'State', session: Optional[RolePlaying], + max_messages: int, chat: ChatBotHistory, + saved_assistant_msg: Optional[BaseMessage]) -> None: state.session = session state.max_messages = max_messages state.chat = chat @@ -216,7 +215,7 @@ def role_playing_chat_init(state) -> \ try: init_assistant_msg, _ = session.init_chat() - init_assistant_msg: AssistantChatMessage + init_assistant_msg: BaseMessage except (openai.error.RateLimitError, tenacity.RetryError, RuntimeError) as ex: print("OpenAI API exception 1 " + str(ex)) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 774833286b..0d166e7c1a 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -20,7 +20,7 @@ from camel.agents import BaseAgent from camel.configs import ChatGPTConfig -from camel.messages import ChatMessage, MessageType, SystemMessage +from camel.messages import BaseMessage from camel.models import BaseModelBackend, ModelFactory from camel.typing import ModelType, RoleType from camel.utils import num_tokens_from_messages, openai_api_key_required @@ -31,7 +31,7 @@ class ChatAgentResponse: r"""Response of a ChatAgent. Attributes: - msgs (List[ChatMessage]): A list of zero, one or several messages. + msgs (List[BaseMessage]): A list of zero, one or several messages. If the list is empty, there is some error in message generation. If the list has one message, this is normal mode. If the list has several messages, this is the critic mode. @@ -39,7 +39,7 @@ class ChatAgentResponse: to terminate the chat session. info (Dict[str, Any]): Extra information about the chat message. """ - msgs: List[ChatMessage] + msgs: List[BaseMessage] terminated: bool info: Dict[str, Any] @@ -51,11 +51,32 @@ def msg(self): return self.msgs[0] +@dataclass(frozen=True) +class ChatRecord: + r"""Historical records of who made what message. + + Attributes: + role_at_backend (str): Role of the message that mirrors OpenAI + message role that may be `system` or `user` or `assistant`. + message (BaseMessage): Message payload. + """ + role_at_backend: str + message: BaseMessage + + def to_openai_message(self): + r"""Converts the payload message to OpenAI-compatible format. + + Returns: + OpenAIMessage: OpenAI-compatible message + """ + return self.message.to_openai_message(self.role_at_backend) + + class ChatAgent(BaseAgent): r"""Class for managing conversations of CAMEL Chat Agents. Args: - system_message (SystemMessage): The system message for the chat agent. + system_message (BaseMessage): The system message for the chat agent. model (ModelType, optional): The LLM model to use for generating responses. (default :obj:`ModelType.GPT_3_5_TURBO`) model_config (Any, optional): Configuration options for the LLM model. @@ -69,14 +90,14 @@ class ChatAgent(BaseAgent): def __init__( self, - system_message: SystemMessage, + system_message: BaseMessage, model: Optional[ModelType] = None, model_config: Optional[Any] = None, message_window_size: Optional[int] = None, output_language: Optional[str] = None, ) -> None: - self.system_message: SystemMessage = system_message + self.system_message: BaseMessage = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type self.output_language: Optional[str] = output_language @@ -93,20 +114,21 @@ def __init__( self.model_token_limit: int = self.model_backend.token_limit self.terminated: bool = False + self.stored_messages: List[ChatRecord] self.init_messages() - def reset(self) -> List[MessageType]: + def reset(self) -> List[ChatRecord]: r"""Resets the :obj:`ChatAgent` to its initial state and returns the stored messages. Returns: - List[MessageType]: The stored messages. + List[BaseMessage]: The stored messages. """ self.terminated = False self.init_messages() return self.stored_messages - def set_output_language(self, output_language: str) -> SystemMessage: + def set_output_language(self, output_language: str) -> BaseMessage: r"""Sets the output language for the system message. This method updates the output language for the system message. The output language determines the language in which the output text should be @@ -116,7 +138,7 @@ def set_output_language(self, output_language: str) -> SystemMessage: output_language (str): The desired output language. Returns: - SystemMessage: The updated system message object. + BaseMessage: The updated system message object. """ self.output_language = output_language content = (self.system_message.content + @@ -156,32 +178,46 @@ def init_messages(self) -> None: r"""Initializes the stored messages list with the initial system message. """ - self.stored_messages: List[MessageType] = [self.system_message] + self.stored_messages = [ChatRecord('system', self.system_message)] - def update_messages(self, message: ChatMessage) -> List[MessageType]: + def update_messages(self, role: str, + message: BaseMessage) -> List[ChatRecord]: r"""Updates the stored messages list with a new message. Args: - message (ChatMessage): The new message to add to the stored + message (BaseMessage): The new message to add to the stored messages. Returns: - List[ChatMessage]: The updated stored messages. + List[BaseMessage]: The updated stored messages. """ - self.stored_messages.append(message) + if role not in {'system', 'user', 'assistant'}: + raise ValueError(f"Unsupported role {role}") + self.stored_messages.append(ChatRecord(role, message)) return self.stored_messages + def submit_message(self, message: BaseMessage) -> None: + r"""Submits the externaly provided message as if it were an answer of + the chat LLM from the backend. Currently the choise of the critic is + submitted with this method. + + Args: + message (BaseMessage): An external message to be added as an + assistant response. + """ + self.stored_messages.append(ChatRecord('assistant', message)) + @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) @openai_api_key_required def step( self, - input_message: ChatMessage, + input_message: BaseMessage, ) -> ChatAgentResponse: r"""Performs a single step in the chat session by generating a response to the input message. Args: - input_message (ChatMessage): The input message to the agent. + input_message (BaseMessage): The input message to the agent. Its `role` field that specifies the role at backen may be either `user` or `assistant` but it will be set to `user` anyway since for the self agent any incoming message is external. @@ -192,16 +228,15 @@ def step( the chat session has terminated, and information about the chat session. """ - msg_user_at_backend = input_message.set_user_role_at_backend() - messages = self.update_messages(msg_user_at_backend) + messages = self.update_messages('user', input_message) if self.message_window_size is not None and len( messages) > self.message_window_size: - messages = [self.system_message + messages = [ChatRecord('system', self.system_message) ] + messages[-self.message_window_size:] - openai_messages = [message.to_openai_message() for message in messages] + openai_messages = [record.to_openai_message() for record in messages] num_tokens = num_tokens_from_messages(openai_messages, self.model) - output_messages: Optional[List[ChatMessage]] + output_messages: Optional[List[BaseMessage]] info: Dict[str, Any] if num_tokens < self.model_token_limit: @@ -209,8 +244,9 @@ def step( if not isinstance(response, dict): raise RuntimeError("OpenAI returned unexpected struct") output_messages = [ - ChatMessage(role_name=self.role_name, role_type=self.role_type, - meta_dict=dict(), **dict(choice["message"])) + BaseMessage(role_name=self.role_name, role_type=self.role_type, + meta_dict=dict(), + content=choice["message"]['content']) for choice in response["choices"] ] info = self.get_info( diff --git a/camel/agents/critic_agent.py b/camel/agents/critic_agent.py index c1ef819855..2941f698bb 100644 --- a/camel/agents/critic_agent.py +++ b/camel/agents/critic_agent.py @@ -19,7 +19,7 @@ from colorama import Fore from camel.agents import ChatAgent -from camel.messages import ChatMessage, SystemMessage +from camel.messages import BaseMessage from camel.typing import ModelType from camel.utils import get_first_int, print_text_animated @@ -28,7 +28,7 @@ class CriticAgent(ChatAgent): r"""A class for the critic agent that assists in selecting an option. Args: - system_message (SystemMessage): The system message for the critic + system_message (BaseMessage): The system message for the critic agent. model (ModelType, optional): The LLM model to use for generating responses. (default :obj:`ModelType.GPT_3_5_TURBO`) @@ -46,7 +46,7 @@ class CriticAgent(ChatAgent): def __init__( self, - system_message: SystemMessage, + system_message: BaseMessage, model: ModelType = ModelType.GPT_3_5_TURBO, model_config: Optional[Any] = None, message_window_size: int = 6, @@ -61,11 +61,11 @@ def __init__( self.verbose = verbose self.logger_color = logger_color - def flatten_options(self, messages: Sequence[ChatMessage]) -> str: + def flatten_options(self, messages: Sequence[BaseMessage]) -> str: r"""Flattens the options to the critic. Args: - messages (Sequence[ChatMessage]): A list of `ChatMessage` objects. + messages (Sequence[BaseMessage]): A list of `BaseMessage` objects. Returns: str: A string containing the flattened options to the critic. @@ -83,11 +83,11 @@ def flatten_options(self, messages: Sequence[ChatMessage]) -> str: "and then your explanation and comparison: ") return flatten_options + format - def get_option(self, input_message: ChatMessage) -> str: + def get_option(self, input_message: BaseMessage) -> str: r"""Gets the option selected by the critic. Args: - input_message (ChatMessage): A `ChatMessage` object representing + input_message (BaseMessage): A `BaseMessage` object representing the input message. Returns: @@ -104,8 +104,8 @@ def get_option(self, input_message: ChatMessage) -> str: if critic_response.terminated: raise RuntimeError("Critic step failed.") - critic_msg = critic_response.msgs[0] - self.update_messages(critic_msg) + critic_msg = critic_response.msg + self.update_messages('assistant', critic_msg) if self.verbose: print_text_animated(self.logger_color + "\n> Critic response: " f"\x1b[3m{critic_msg.content}\x1b[0m\n") @@ -114,11 +114,10 @@ def get_option(self, input_message: ChatMessage) -> str: if choice in self.options_dict: return self.options_dict[choice] else: - input_message = ChatMessage( + input_message = BaseMessage( role_name=input_message.role_name, role_type=input_message.role_type, meta_dict=input_message.meta_dict, - role=input_message.role, content="> Invalid choice. Please choose again.\n" + msg_content, ) @@ -128,11 +127,11 @@ def get_option(self, input_message: ChatMessage) -> str: "Returning a random option.") return random.choice(list(self.options_dict.values())) - def parse_critic(self, critic_msg: ChatMessage) -> Optional[str]: + def parse_critic(self, critic_msg: BaseMessage) -> Optional[str]: r"""Parses the critic's message and extracts the choice. Args: - critic_msg (ChatMessage): A `ChatMessage` object representing the + critic_msg (BaseMessage): A `BaseMessage` object representing the critic's response. Returns: @@ -142,22 +141,21 @@ def parse_critic(self, critic_msg: ChatMessage) -> Optional[str]: choice = str(get_first_int(critic_msg.content)) return choice - def step(self, messages: Sequence[ChatMessage]) -> ChatMessage: + def step(self, messages: Sequence[BaseMessage]) -> BaseMessage: r"""Performs one step of the conversation by flattening options to the critic, getting the option, and parsing the choice. Args: - messages (Sequence[ChatMessage]): A list of ChatMessage objects. + messages (Sequence[BaseMessage]): A list of BaseMessage objects. Returns: - ChatMessage: A `ChatMessage` object representing the critic's + BaseMessage: A `BaseMessage` object representing the critic's choice. """ - meta_chat_message = ChatMessage( + meta_chat_message = BaseMessage( role_name=messages[0].role_name, role_type=messages[0].role_type, meta_dict=messages[0].meta_dict, - role=messages[0].role, content="", ) diff --git a/camel/agents/embodied_agent.py b/camel/agents/embodied_agent.py index a9bf44872d..47ab0674a2 100644 --- a/camel/agents/embodied_agent.py +++ b/camel/agents/embodied_agent.py @@ -16,7 +16,7 @@ from colorama import Fore from camel.agents import BaseToolAgent, ChatAgent, HuggingFaceToolAgent -from camel.messages import ChatMessage, SystemMessage +from camel.messages import BaseMessage from camel.typing import ModelType from camel.utils import print_text_animated @@ -25,7 +25,7 @@ class EmbodiedAgent(ChatAgent): r"""Class for managing conversations of CAMEL Embodied Agents. Args: - system_message (SystemMessage): The system message for the chat agent. + system_message (BaseMessage): The system message for the chat agent. model (ModelType, optional): The LLM model to use for generating responses. (default :obj:`ModelType.GPT_4`) model_config (Any, optional): Configuration options for the LLM model. @@ -42,7 +42,7 @@ class EmbodiedAgent(ChatAgent): def __init__( self, - system_message: SystemMessage, + system_message: BaseMessage, model: ModelType = ModelType.GPT_4, model_config: Optional[Any] = None, message_window_size: Optional[int] = None, @@ -79,15 +79,15 @@ def get_action_space_prompt(self) -> str: def step( self, - input_message: ChatMessage, - ) -> Tuple[ChatMessage, bool, Dict[str, Any]]: + input_message: BaseMessage, + ) -> Tuple[BaseMessage, bool, Dict[str, Any]]: r"""Performs a step in the conversation. Args: - input_message (ChatMessage): The input message. + input_message (BaseMessage): The input message. Returns: - Tuple[ChatMessage, bool, Dict[str, Any]]: A tuple + Tuple[BaseMessage, bool, Dict[str, Any]]: A tuple containing the output messages, termination status, and additional information. """ @@ -126,7 +126,6 @@ def step( # TODO: Handle errors content = input_message.content + (Fore.RESET + f"\n> Embodied Actions:\n{content}") - message = ChatMessage(input_message.role_name, input_message.role_type, - input_message.meta_dict, input_message.role, - content) + message = BaseMessage(input_message.role_name, input_message.role_type, + input_message.meta_dict, content) return message, response.terminated, response.info diff --git a/camel/agents/task_agent.py b/camel/agents/task_agent.py index 2a3a6369a0..41f013ac73 100644 --- a/camel/agents/task_agent.py +++ b/camel/agents/task_agent.py @@ -15,7 +15,7 @@ from camel.agents import ChatAgent from camel.configs import ChatGPTConfig -from camel.messages import SystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator, TextPrompt from camel.typing import ModelType, RoleType, TaskType @@ -65,9 +65,10 @@ def __init__( model_config = model_config or ChatGPTConfig(temperature=1.0) - system_message = SystemMessage( + system_message = BaseMessage( role_name="Task Specifier", role_type=RoleType.ASSISTANT, + meta_dict=None, content="You can make a task more specific.", ) @@ -99,8 +100,8 @@ def step( self.task_specify_prompt = (self.task_specify_prompt.format( **meta_dict)) - task_msg = UserChatMessage(role_name="Task Specifier", - content=self.task_specify_prompt) + task_msg = BaseMessage.make_user_message( + role_name="Task Specifier", content=self.task_specify_prompt) specifier_response = super().step(task_msg) if (specifier_response.msgs is None or len(specifier_response.msgs) == 0): @@ -140,9 +141,10 @@ def __init__( self.task_planner_prompt = TextPrompt( "Divide this task into subtasks: {task}. Be concise.") - system_message = SystemMessage( + system_message = BaseMessage( role_name="Task Planner", role_type=RoleType.ASSISTANT, + meta_dict=None, content="You are a helpful task planner.", ) @@ -167,8 +169,8 @@ def step( self.task_planner_prompt = self.task_planner_prompt.format( task=task_prompt) - task_msg = UserChatMessage(role_name="Task Planner", - content=self.task_planner_prompt) + task_msg = BaseMessage.make_user_message( + role_name="Task Planner", content=self.task_planner_prompt) # sub_tasks_msgs, terminated, _ task_response = super().step(task_msg) diff --git a/camel/generators.py b/camel/generators.py index cc9d6ec916..2f94375b05 100644 --- a/camel/generators.py +++ b/camel/generators.py @@ -13,7 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from typing import Dict, Generator, List, Optional, Set, Tuple -from camel.messages import SystemMessage, SystemMessageType +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator, TextPrompt from camel.typing import RoleType, TaskType @@ -93,7 +93,7 @@ def from_dict( self, meta_dict: Dict[str, str], role_tuple: Tuple[str, RoleType] = ("", RoleType.DEFAULT), - ) -> SystemMessageType: + ) -> BaseMessage: r"""Generates a system message from a dictionary. Args: @@ -103,20 +103,20 @@ def from_dict( the role name and role type. (default: ("", RoleType.DEFAULT)) Returns: - SystemMessageType: The generated system message. + BaseMessage: The generated system message. """ self.validate_meta_dict_keys(meta_dict) role_name, role_type = role_tuple sys_prompt = self.sys_prompts[role_type] sys_prompt = sys_prompt.format(**meta_dict) - return SystemMessage(role_name=role_name, role_type=role_type, - meta_dict=meta_dict, content=sys_prompt) + return BaseMessage(role_name=role_name, role_type=role_type, + meta_dict=meta_dict, content=sys_prompt) def from_dicts( self, meta_dicts: List[Dict[str, str]], role_tuples: List[Tuple[str, RoleType]], - ) -> List[SystemMessageType]: + ) -> List[BaseMessage]: r"""Generates a list of system messages from a list of dictionaries. Args: @@ -126,7 +126,7 @@ def from_dicts( containing the role name and role type for each system message. Returns: - List[SystemMessageType]: A list of generated system messages. + List[BaseMessage]: A list of generated system messages. Raises: ValueError: If the number of meta_dicts and role_tuples are diff --git a/camel/human.py b/camel/human.py index 07321e35ed..fd7ffdfdf6 100644 --- a/camel/human.py +++ b/camel/human.py @@ -15,7 +15,7 @@ from colorama import Fore -from camel.messages import ChatMessage +from camel.messages import BaseMessage from camel.utils import print_text_animated @@ -46,11 +46,11 @@ def __init__(self, name: str = "Kill Switch Engineer", self.kill_button = "Stop!!!" self.options_dict: Dict[str, str] = dict() - def display_options(self, messages: Sequence[ChatMessage]) -> None: + def display_options(self, messages: Sequence[BaseMessage]) -> None: r"""Displays the options to the user. Args: - messages (Sequence[ChatMessage]): A list of `ChatMessage` objects. + messages (Sequence[BaseMessage]): A list of `BaseMessage` objects. Returns: None @@ -87,15 +87,15 @@ def get_input(self) -> str: return human_input def parse_input(self, human_input: str, - meta_chat_message: ChatMessage) -> ChatMessage: - r"""Parses the user's input and returns a `ChatMessage` object. + meta_chat_message: BaseMessage) -> BaseMessage: + r"""Parses the user's input and returns a `BaseMessage` object. Args: human_input (str): The user's input. - meta_chat_message (ChatMessage): A `ChatMessage` object. + meta_chat_message (BaseMessage): A `BaseMessage` object. Returns: - ChatMessage: A `ChatMessage` object. + BaseMessage: A `BaseMessage` object. """ if self.options_dict[human_input] == self.input_button: meta_chat_message.content = input(self.logger_color + @@ -107,21 +107,20 @@ def parse_input(self, human_input: str, meta_chat_message.content = self.options_dict[human_input] return meta_chat_message - def step(self, messages: Sequence[ChatMessage]) -> ChatMessage: + def step(self, messages: Sequence[BaseMessage]) -> BaseMessage: r"""Performs one step of the conversation by displaying options to the user, getting their input, and parsing their choice. Args: - messages (Sequence[ChatMessage]): A list of ChatMessage objects. + messages (Sequence[BaseMessage]): A list of BaseMessage objects. Returns: - ChatMessage: A `ChatMessage` object representing the user's choice. + BaseMessage: A `BaseMessage` object representing the user's choice. """ - meta_chat_message = ChatMessage( + meta_chat_message = BaseMessage( role_name=messages[0].role_name, role_type=messages[0].role_type, meta_dict=messages[0].meta_dict, - role=messages[0].role, content="", ) self.display_options(messages) diff --git a/camel/messages/__init__.py b/camel/messages/__init__.py index 4fe78e3292..6339185201 100644 --- a/camel/messages/__init__.py +++ b/camel/messages/__init__.py @@ -20,19 +20,6 @@ OpenAIMessage = Union[OpenAISystemMessage, OpenAIChatMessage] from .base import BaseMessage # noqa: E402 -from .system_messages import ( # noqa: E402 - SystemMessage, AssistantSystemMessage, UserSystemMessage, -) -from .chat_messages import ( # noqa: E402 - ChatMessage, AssistantChatMessage, UserChatMessage, -) - -MessageType = Union[BaseMessage, SystemMessage, AssistantSystemMessage, - UserSystemMessage, ChatMessage, AssistantChatMessage, - UserChatMessage] -SystemMessageType = Union[SystemMessage, AssistantSystemMessage, - UserSystemMessage] -ChatMessageType = Union[ChatMessage, AssistantChatMessage, UserChatMessage] __all__ = [ 'OpenAISystemMessage', @@ -41,13 +28,4 @@ 'OpenAIChatMessage', 'OpenAIMessage', 'BaseMessage', - 'SystemMessage', - 'AssistantSystemMessage', - 'UserSystemMessage', - 'ChatMessage', - 'AssistantChatMessage', - 'UserChatMessage', - 'MessageType', - 'SystemMessageType', - 'ChatMessageType', ] diff --git a/camel/messages/base.py b/camel/messages/base.py index e28bd5a2fc..061c86bee6 100644 --- a/camel/messages/base.py +++ b/camel/messages/base.py @@ -16,7 +16,6 @@ from camel.messages import ( OpenAIAssistantMessage, - OpenAIChatMessage, OpenAIMessage, OpenAISystemMessage, OpenAIUserMessage, @@ -42,9 +41,20 @@ class BaseMessage: role_name: str role_type: RoleType meta_dict: Optional[Dict[str, str]] - role: str content: str + @classmethod + def make_user_message( + cls, role_name: str, content: str, + meta_dict: Optional[Dict[str, str]] = None) -> 'BaseMessage': + return cls(role_name, RoleType.USER, meta_dict, content) + + @classmethod + def make_assistant_message( + cls, role_name: str, content: str, + meta_dict: Optional[Dict[str, str]] = None) -> 'BaseMessage': + return cls(role_name, RoleType.ASSISTANT, meta_dict, content) + def create_new_instance(self, content: str) -> "BaseMessage": r"""Create a new instance of the :obj:`BaseMessage` with updated content. @@ -57,8 +67,7 @@ def create_new_instance(self, content: str) -> "BaseMessage": """ return self.__class__(role_name=self.role_name, role_type=self.role_type, - meta_dict=self.meta_dict, role=self.role, - content=content) + meta_dict=self.meta_dict, content=content) def __add__(self, other: Any) -> Union["BaseMessage", Any]: r"""Addition operator override for :obj:`BaseMessage`. @@ -116,10 +125,13 @@ def __contains__(self, item: str) -> bool: """ return item in self.content - def token_len(self, model: ModelType = ModelType.GPT_3_5_TURBO) -> int: + def token_len(self, role_at_backend: str, + model: ModelType = ModelType.GPT_3_5_TURBO) -> int: r"""Calculate the token length of the message for the specified model. Args: + role_at_backend (str): interpret this message as of a specified + role, so that the special tokens can be counted properly. model (ModelType, optional): The model type to calculate the token length. (default: :obj:`ModelType.GPT_3_5_TURBO`) @@ -127,7 +139,8 @@ def token_len(self, model: ModelType = ModelType.GPT_3_5_TURBO) -> int: int: The token length of the message. """ from camel.utils import num_tokens_from_messages - return num_tokens_from_messages([self.to_openai_chat_message()], model) + return num_tokens_from_messages( + [self.to_openai_message(role_at_backend)], model) def extract_text_and_code_prompts( self) -> Tuple[List[TextPrompt], List[CodePrompt]]: @@ -167,40 +180,20 @@ def extract_text_and_code_prompts( return text_prompts, code_prompts - def to_openai_message(self, role: Optional[str] = None) -> OpenAIMessage: + def to_openai_message(self, role_at_backend: str) -> OpenAIMessage: r"""Converts the message to an :obj:`OpenAIMessage` object. Args: - role (Optional[str]): The role of the message in OpenAI chat + role_at_backend (str): The role of the message in OpenAI chat system, either :obj:`"system"`, :obj:`"user"`, or - obj:`"assistant"`. (default: :obj:`None`) + obj:`"assistant"`. Returns: OpenAIMessage: The converted :obj:`OpenAIMessage` object. """ - role = role or self.role - if role not in {"system", "user", "assistant"}: - raise ValueError(f"Unrecognized role: {role}") - return {"role": role, "content": self.content} - - def to_openai_chat_message( - self, - role: Optional[str] = None, - ) -> OpenAIChatMessage: - r"""Converts the message to an :obj:`OpenAIChatMessage` object. - - Args: - role (Optional[str]): The role of the message in OpenAI chat - system, either :obj:`"user"`, or :obj:`"assistant"`. - (default: :obj:`None`) - - Returns: - OpenAIChatMessage: The converted :obj:`OpenAIChatMessage` object. - """ - role = role or self.role - if role not in {"user", "assistant"}: - raise ValueError(f"Unrecognized role: {role}") - return {"role": role, "content": self.content} + if role_at_backend not in {"system", "user", "assistant"}: + raise ValueError(f"Unrecognized role: {role_at_backend}") + return {"role": role_at_backend, "content": self.content} def to_openai_system_message(self) -> OpenAISystemMessage: r"""Converts the message to an :obj:`OpenAISystemMessage` object. @@ -238,6 +231,5 @@ def to_dict(self) -> Dict: "role_name": self.role_name, "role_type": self.role_type.name, **(self.meta_dict or {}), - "role": self.role, "content": self.content, } diff --git a/camel/messages/chat_messages.py b/camel/messages/chat_messages.py deleted file mode 100644 index ded4819570..0000000000 --- a/camel/messages/chat_messages.py +++ /dev/null @@ -1,89 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from dataclasses import dataclass -from typing import Dict, Optional - -from camel.messages import BaseMessage -from camel.typing import RoleType - - -@dataclass -class ChatMessage(BaseMessage): - r"""Base class for chat messages used in CAMEL chat system. - - Args: - role_name (str): The name of the user or assistant role. - role_type (RoleType): The type of role, either - :obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType - meta_dict: Optional[Dict[str, str]] - role: str - content: str = "" - - def set_user_role_at_backend(self) -> "ChatMessage": - return self.__class__( - role_name=self.role_name, - role_type=self.role_type, - meta_dict=self.meta_dict, - role="user", - content=self.content, - ) - - -@dataclass -class AssistantChatMessage(ChatMessage): - r"""Class for chat messages from the assistant role used in CAMEL chat - system. - - Attributes: - role_name (str): The name of the assistant role. - role_type (RoleType): The type of role, always - :obj:`RoleType.ASSISTANT`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - (default: :obj:`"assistant"`) - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType = RoleType.ASSISTANT - meta_dict: Optional[Dict[str, str]] = None - role: str = "user" - content: str = "" - - -@dataclass -class UserChatMessage(ChatMessage): - r"""Class for chat messages from the user role used in CAMEL chat system. - - Args: - role_name (str): The name of the user role. - role_type (RoleType): The type of role, always :obj:`RoleType.USER`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - (default: :obj:`"user"`) - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType = RoleType.USER - meta_dict: Optional[Dict[str, str]] = None - role: str = "user" - content: str = "" diff --git a/camel/messages/system_messages.py b/camel/messages/system_messages.py deleted file mode 100644 index 5a4cc9185e..0000000000 --- a/camel/messages/system_messages.py +++ /dev/null @@ -1,81 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from dataclasses import dataclass -from typing import Dict, Optional - -from camel.messages import BaseMessage -from camel.typing import RoleType - - -@dataclass -class SystemMessage(BaseMessage): - r"""Class for system messages used in CAMEL chat system. - - Args: - role_name (str): The name of the user or assistant role. - role_type (RoleType): The type of role, either - :obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - (default: :obj:`"system"`) - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType - meta_dict: Optional[Dict[str, str]] = None - role: str = "system" - content: str = "" - - -@dataclass -class AssistantSystemMessage(SystemMessage): - r"""Class for system messages from the assistant used in the CAMEL chat - system. - - Args: - role_name (str): The name of the assistant role. - role_type (RoleType): The type of role, always - :obj:`RoleType.ASSISTANT`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - (default: :obj:`"system"`) - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType = RoleType.ASSISTANT - meta_dict: Optional[Dict[str, str]] = None - role: str = "system" - content: str = "" - - -@dataclass -class UserSystemMessage(SystemMessage): - r"""Class for system messages from the user used in the CAMEL chat system. - - Args: - role_name (str): The name of the user role. - role_type (RoleType): The type of role, always :obj:`RoleType.USER`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system. - (default: :obj:`"system"`) - content (str): The content of the message. (default: :obj:`""`) - """ - role_name: str - role_type: RoleType = RoleType.USER - meta_dict: Optional[Dict[str, str]] = None - role: str = "system" - content: str = "" diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index cc98ef06d3..76aa7fdd11 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -22,7 +22,7 @@ from camel.agents.chat_agent import ChatAgentResponse from camel.generators import SystemMessageGenerator from camel.human import Human -from camel.messages import AssistantChatMessage, ChatMessage, UserChatMessage +from camel.messages import BaseMessage from camel.typing import ModelType, RoleType, TaskType @@ -194,29 +194,30 @@ def __init__( else: self.critic = None - def init_chat(self) -> Tuple[AssistantChatMessage, List[ChatMessage]]: + def init_chat(self) -> Tuple[BaseMessage, List[BaseMessage]]: r"""Initializes the chat by resetting both of the assistant and user agents, and sending the system messages again to the agents using chat messages. Returns the assistant's introductory message and the user's response messages. Returns: - A tuple containing an `AssistantChatMessage` representing the - assistant's introductory message, and a list of `ChatMessage`s + A tuple containing an `BaseMessage` representing the + assistant's introductory message, and a list of `BaseMessage`s representing the user's response messages. """ self.assistant_agent.reset() self.user_agent.reset() # Send the system messages again to the agents using chat messages - assistant_msg = AssistantChatMessage( - role_name=self.assistant_sys_msg.role_name, role="assistant", + assistant_msg = BaseMessage.make_assistant_message( + role_name=self.assistant_sys_msg.role_name, content=(f"{self.user_sys_msg.content}. " "Now start to give me instructions one by one. " "Only reply with Instruction and Input.")) - user_msg = UserChatMessage(role_name=self.user_sys_msg.role_name, - content=f"{self.assistant_sys_msg.content}") + user_msg = BaseMessage.make_user_message( + role_name=self.user_sys_msg.role_name, + content=f"{self.assistant_sys_msg.content}") assistant_response = self.assistant_agent.step(user_msg) if assistant_response.terminated or assistant_response.msgs is None: raise ValueError(f"Assistant agent terminated unexpectedly. " @@ -224,20 +225,20 @@ def init_chat(self) -> Tuple[AssistantChatMessage, List[ChatMessage]]: return assistant_msg, assistant_response.msgs - def process_messages( + def reduce_message_options( self, - messages: Sequence[ChatMessage], - ) -> ChatMessage: + messages: Sequence[BaseMessage], + ) -> BaseMessage: r"""Processes a sequence of chat messages, returning the processed message. If multiple messages are provided and `with_critic_in_the_loop` is `False`, raises a `ValueError`. If no messages are provided, a `ValueError` will be raised. Args: - messages: A sequence of `ChatMessage` objects to process. + messages: A sequence of `BaseMessage` objects to process. Returns: - A single `ChatMessage` representing the processed message. + A single `BaseMessage` representing the processed message. """ if len(messages) == 0: raise ValueError("No messages to process.") @@ -253,7 +254,7 @@ def process_messages( def step( self, - assistant_msg: ChatMessage, + assistant_msg: BaseMessage, ) -> Tuple[ChatAgentResponse, ChatAgentResponse]: r"""Advances the conversation by taking a message from the assistant, processing it using the user agent, and then processing the resulting @@ -264,7 +265,7 @@ def step( terminated the conversation, and any additional user information. Args: - assistant_msg: A `ChatMessage` representing the message from the + assistant_msg: A `BaseMessage` representing the message from the assistant. Returns: @@ -280,16 +281,16 @@ def step( return (ChatAgentResponse([], False, {}), ChatAgentResponse([], user_response.terminated, user_response.info)) - user_msg = self.process_messages(user_response.msgs) - self.user_agent.update_messages(user_msg) + user_msg = self.reduce_message_options(user_response.msgs) + self.user_agent.submit_message(user_msg) assistant_response = self.assistant_agent.step(user_msg) if assistant_response.terminated or assistant_response.msgs is None: return (ChatAgentResponse([], assistant_response.terminated, assistant_response.info), ChatAgentResponse([user_msg], False, user_response.info)) - assistant_msg = self.process_messages(assistant_response.msgs) - self.assistant_agent.update_messages(assistant_msg) + assistant_msg = self.reduce_message_options(assistant_response.msgs) + self.assistant_agent.submit_message(assistant_msg) return ( ChatAgentResponse([assistant_msg], assistant_response.terminated, diff --git a/examples/ai_society/generate_meta_data.py b/examples/ai_society/generate_meta_data.py index ae603c654c..cf67fe6c42 100644 --- a/examples/ai_society/generate_meta_data.py +++ b/examples/ai_society/generate_meta_data.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import TaskType @@ -22,14 +22,14 @@ def main(key: str = "generate_users", num_roles: int = 50): TaskType.AI_SOCIETY, key) prompt = prompt_template.format(num_roles=num_roles) print(prompt) - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) agent = ChatAgent(assistant_sys_msg) agent.reset() - user_msg = UserChatMessage( + user_msg = BaseMessage.make_user_message( role_name="User", content=prompt, ) diff --git a/examples/ai_society/task_generation.py b/examples/ai_society/task_generation.py index 63d7189509..a12d096d95 100644 --- a/examples/ai_society/task_generation.py +++ b/examples/ai_society/task_generation.py @@ -19,7 +19,7 @@ AISocietyTaskPromptGenerator, SystemMessageGenerator, ) -from camel.messages import UserChatMessage +from camel.messages import BaseMessage from camel.typing import RoleType, TaskType @@ -32,8 +32,8 @@ def generate_tasks(role_names: str, task_generator_prompt: str, dict(), role_tuple=("Task Generator", RoleType.DEFAULT)) assistant_agent = ChatAgent(assistant_sys_msg, model=model) - user_msg = UserChatMessage(role_name="Task Generator", - content=task_generator_prompt) + user_msg = BaseMessage.make_user_message(role_name="Task Generator", + content=task_generator_prompt) assistant_response = assistant_agent.step(user_msg) diff --git a/examples/code/generate_meta_data.py b/examples/code/generate_meta_data.py index 728b9f0e9c..e40b13184f 100644 --- a/examples/code/generate_meta_data.py +++ b/examples/code/generate_meta_data.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import TaskType @@ -22,14 +22,14 @@ def generate_meta_data(meta_data: str, num: int = 50, model=None): TaskType.CODE, f"generate_{meta_data}") prompt = prompt_template.format(**{f"num_{meta_data}": num}) print(prompt) - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) agent = ChatAgent(assistant_sys_msg, model=model) agent.reset() - user_msg = UserChatMessage( + user_msg = BaseMessage.make_user_message( role_name="User", content=prompt, ) diff --git a/examples/code/role_playing_multiprocess.py b/examples/code/role_playing_multiprocess.py index 14571ecf8a..fc59334f18 100644 --- a/examples/code/role_playing_multiprocess.py +++ b/examples/code/role_playing_multiprocess.py @@ -18,12 +18,7 @@ from camel.agents import ChatAgent, TaskSpecifyAgent from camel.configs import ChatGPTConfig from camel.generators import SystemMessageGenerator -from camel.messages import ( - AssistantChatMessage, - AssistantSystemMessage, - UserChatMessage, - UserSystemMessage, -) +from camel.messages import BaseMessage from camel.typing import RoleType, TaskType from camel.utils import download_tasks @@ -31,21 +26,21 @@ def init_chat( assistant_agent: ChatAgent, user_agent: ChatAgent, - user_sys_msg: UserSystemMessage, - assistant_sys_msg: AssistantSystemMessage, + user_sys_msg: BaseMessage, + assistant_sys_msg: BaseMessage, ): assistant_agent.reset() user_agent.reset() # Send the system messages again to the agents using chat messages - assistant_msg = AssistantChatMessage( - role_name=assistant_agent.role_name, role="assistant", + assistant_msg = BaseMessage.make_assistant_message( + role_name=assistant_agent.role_name, content=(f"{user_sys_msg.content}. " "Now start to give me instructions one by one. " "Only reply with Instruction and Input.")) - user_msg = UserChatMessage(role_name=user_agent.role_name, - content=f"{assistant_sys_msg.content}") + user_msg = BaseMessage.make_user_message( + role_name=user_agent.role_name, content=f"{assistant_sys_msg.content}") assistant_response = assistant_agent.step(user_msg) return assistant_msg, assistant_response.msgs @@ -134,7 +129,7 @@ def generate_data(language_idx: int, language_name: str, domain_idx: int, f"{user_response.info['termination_reasons'][0]}") break - user_agent.update_messages(user_response.msg) + user_agent.submit_message(user_response.msg) print(f"User:\n{user_response.msg.content}\n") assistant_response = assistant_agent.step(user_response.msg) @@ -146,7 +141,7 @@ def generate_data(language_idx: int, language_name: str, domain_idx: int, f"{assistant_response.info['termination_reasons'][0]}") break - assistant_agent.update_messages(assistant_response.msg) + assistant_agent.submit_message(assistant_response.msg) print(f"Assistant:\n{assistant_response.msg.content}\n") # Condition 3: Break if user does not give instruction diff --git a/examples/code/task_generation.py b/examples/code/task_generation.py index 43aec07a93..140b68b39c 100644 --- a/examples/code/task_generation.py +++ b/examples/code/task_generation.py @@ -16,7 +16,7 @@ from camel.agents import ChatAgent from camel.generators import CodeTaskPromptGenerator, SystemMessageGenerator -from camel.messages import UserChatMessage +from camel.messages import BaseMessage from camel.typing import RoleType, TaskType @@ -28,8 +28,8 @@ def generate_tasks(task_generator_prompt: str, language: str, domain: str, dict(), role_tuple=("Task Generator", RoleType.DEFAULT)) assistant_agent = ChatAgent(assistant_sys_msg, model=model) - user_msg = UserChatMessage(role_name="Task Generator", - content=task_generator_prompt) + user_msg = BaseMessage.make_user_message(role_name="Task Generator", + content=task_generator_prompt) assistant_response = assistant_agent.step(user_msg) diff --git a/examples/embodiment/hugging_face_tool.py b/examples/embodiment/hugging_face_tool.py index d67d9a0506..fdf66be7fc 100644 --- a/examples/embodiment/hugging_face_tool.py +++ b/examples/embodiment/hugging_face_tool.py @@ -16,7 +16,7 @@ from camel.agents import EmbodiedAgent, HuggingFaceToolAgent from camel.agents.tool_agents.base import BaseToolAgent from camel.generators import SystemMessageGenerator -from camel.messages import UserChatMessage +from camel.messages import BaseMessage from camel.typing import ModelType, RoleType @@ -40,7 +40,7 @@ def main(): verbose=True, action_space=action_space, ) - user_msg = UserChatMessage( + user_msg = BaseMessage.make_user_message( role_name=role_name, content=("Draw all the Camelidae species, " "caption the image content, " diff --git a/examples/evaluation/single_agent.py b/examples/evaluation/single_agent.py index a92415a77f..caa8e74d3f 100644 --- a/examples/evaluation/single_agent.py +++ b/examples/evaluation/single_agent.py @@ -17,7 +17,7 @@ from typing import Any, Dict, List from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import TaskType @@ -51,14 +51,14 @@ def generate_questions(examples: str, category: str, save_file_name: str, prompt = prompt_template.format(**evaluation_dict) print(prompt) - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) agent = ChatAgent(assistant_sys_msg, model=model) agent.reset() - user_msg = UserChatMessage(role_name="User", content=prompt) + user_msg = BaseMessage.make_user_message(role_name="User", content=prompt) assistant_response = agent.step(user_msg) if len(assistant_response.msgs) > 0: diff --git a/examples/misalignment/single_agent.py b/examples/misalignment/single_agent.py index 6a5c90a6af..ab4d6c6565 100644 --- a/examples/misalignment/single_agent.py +++ b/examples/misalignment/single_agent.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import TaskType @@ -22,14 +22,14 @@ def main(model=None) -> None: "cooperatively to achieve together. Be concise. Be creative.") sys_prompt = PromptTemplateGenerator().get_prompt_from_key( TaskType.MISALIGNMENT, "dan_prompt") - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content=sys_prompt, ) agent = ChatAgent(assistant_sys_msg, model=model) agent.reset() - user_msg = UserChatMessage(role_name="User", content=prompt) + user_msg = BaseMessage.make_user_message(role_name="User", content=prompt) assistant_response = agent.step(user_msg) print(assistant_response.msg.content) diff --git a/examples/misalignment/task_generation.py b/examples/misalignment/task_generation.py index a6c1b26be5..85fd290c47 100644 --- a/examples/misalignment/task_generation.py +++ b/examples/misalignment/task_generation.py @@ -21,7 +21,7 @@ RoleNameGenerator, SystemMessageGenerator, ) -from camel.messages import UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import ModelType, RoleType, TaskType @@ -35,8 +35,8 @@ def generate_tasks(role_names: str, task_generator_prompt: str, role_prompt=role_prompt) assistant_agent = ChatAgent(assistant_sys_msg, ModelType.GPT_3_5_TURBO) - user_msg = UserChatMessage(role_name="Task Generator", - content=task_generator_prompt) + user_msg = BaseMessage.make_user_message(role_name="Task Generator", + content=task_generator_prompt) assistant_response = assistant_agent.step(user_msg) diff --git a/examples/single_agent.py b/examples/single_agent.py index 8c8048759f..3a89261672 100644 --- a/examples/single_agent.py +++ b/examples/single_agent.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import PromptTemplateGenerator from camel.typing import TaskType @@ -22,14 +22,14 @@ def main(key: str = 'generate_users', num_roles: int = 50, model=None): TaskType.AI_SOCIETY, key) prompt = prompt_template.format(num_roles=num_roles) print(prompt) - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) agent = ChatAgent(assistant_sys_msg, model=model) agent.reset() - user_msg = UserChatMessage(role_name="User", content=prompt) + user_msg = BaseMessage.make_user_message(role_name="User", content=prompt) assistant_response = agent.step(user_msg) print(assistant_response.msg.content) diff --git a/examples/summarization/gpt_solution_extraction.py b/examples/summarization/gpt_solution_extraction.py index 9b66a0cde3..17d8421973 100644 --- a/examples/summarization/gpt_solution_extraction.py +++ b/examples/summarization/gpt_solution_extraction.py @@ -22,7 +22,7 @@ import numpy as np from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage from camel.prompts import SolutionExtractionPromptTemplateDict from camel.typing import ModelType, RoleType @@ -114,7 +114,7 @@ def solution_extraction(conversation: Dict, flattened_conversation: str, solution_extraction_template = SolutionExtractionPromptTemplateDict() assistant_sys_msg_prompt = solution_extraction_template[RoleType.ASSISTANT] - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Solution Extractor", content=assistant_sys_msg_prompt) # We use GPT4 because it has a longer context length @@ -123,7 +123,7 @@ def solution_extraction(conversation: Dict, flattened_conversation: str, prompt = "Here is the conversation:" + flattened_conversation - user_msg = UserChatMessage(role_name="User", content=prompt) + user_msg = BaseMessage.make_user_message(role_name="User", content=prompt) assistant_response = agent.step(user_msg) print(assistant_response.msg.content) diff --git a/examples/summarization/gpt_solver.py b/examples/summarization/gpt_solver.py index ede675bf6c..648856dc93 100644 --- a/examples/summarization/gpt_solver.py +++ b/examples/summarization/gpt_solver.py @@ -17,7 +17,7 @@ from typing import Dict from camel.agents import ChatAgent -from camel.messages import AssistantSystemMessage, UserChatMessage +from camel.messages import BaseMessage # Directory containing your json files of CAMEL conversations # This code will append a new key called "gpt_solution" to each json file @@ -30,7 +30,7 @@ def process_file(data: Dict[str, str]) -> None: print(data["id"]) - assistant_sys_msg = AssistantSystemMessage( + assistant_sys_msg = BaseMessage.make_assistant_message( role_name="Assistant", content="You are a helpful assistant.", ) @@ -38,7 +38,7 @@ def process_file(data: Dict[str, str]) -> None: agent.reset() prompt = "Solve the following task:\n" + data["specified_task"] - user_msg = UserChatMessage(role_name="User", content=prompt) + user_msg = BaseMessage.make_user_message(role_name="User", content=prompt) assistant_response = agent.step(user_msg) print(assistant_response.msg.content) diff --git a/examples/translation/translator.py b/examples/translation/translator.py index 5211ef0c1b..fe1b83aacc 100644 --- a/examples/translation/translator.py +++ b/examples/translation/translator.py @@ -18,7 +18,7 @@ from camel.agents import ChatAgent from camel.generators import SystemMessageGenerator -from camel.messages import UserChatMessage +from camel.messages import BaseMessage from camel.typing import ModelType, RoleType, TaskType @@ -52,8 +52,8 @@ def translate_content(directory_path: str, file_path: str, assistant_agent = ChatAgent(assistant_sys_msg, ModelType.GPT_3_5_TURBO) - user_msg = UserChatMessage(role_name="Language Translator", - content=msg_i_content) + user_msg = BaseMessage.make_user_message( + role_name="Language Translator", content=msg_i_content) assistant_response = assistant_agent.step(user_msg) assistant_msg = assistant_response.msg diff --git a/test/agents/test_chat_agent.py b/test/agents/test_chat_agent.py index 53ad434651..90bd7e5c5e 100644 --- a/test/agents/test_chat_agent.py +++ b/test/agents/test_chat_agent.py @@ -16,7 +16,7 @@ from camel.agents import ChatAgent from camel.configs import ChatGPTConfig from camel.generators import SystemMessageGenerator -from camel.messages import ChatMessage, SystemMessage +from camel.messages import BaseMessage from camel.typing import ModelType, RoleType, TaskType parametrize = pytest.mark.parametrize('model', [ @@ -41,8 +41,8 @@ def test_chat_agent(model: ModelType): f"RoleType.ASSISTANT, {str(model)})") assistant.reset() - user_msg = ChatMessage(role_name="Patient", role_type=RoleType.USER, - meta_dict=dict(), role="user", content="Hello!") + user_msg = BaseMessage(role_name="Patient", role_type=RoleType.USER, + meta_dict=dict(), content="Hello!") assistant_response = assistant.step(user_msg) assert isinstance(assistant_response.msgs, list) @@ -54,8 +54,8 @@ def test_chat_agent(model: ModelType): assistant.reset() token_limit = assistant.model_token_limit - user_msg = ChatMessage(role_name="Patient", role_type=RoleType.USER, - meta_dict=dict(), role="user", + user_msg = BaseMessage(role_name="Patient", role_type=RoleType.USER, + meta_dict=dict(), content="token" * (token_limit + 1)) assistant_response = assistant.step(user_msg) @@ -72,13 +72,12 @@ def test_chat_agent(model: ModelType): @pytest.mark.parametrize('n', [1, 2, 3]) def test_chat_agent_multiple_return_messages(n): model_config = ChatGPTConfig(temperature=1.4, n=n) - system_msg = SystemMessage("Assistant", RoleType.ASSISTANT, - content="You are a helpful assistant.") + system_msg = BaseMessage("Assistant", RoleType.ASSISTANT, meta_dict=None, + content="You are a helpful assistant.") assistant = ChatAgent(system_msg, model_config=model_config) assistant.reset() - user_msg = ChatMessage(role_name="User", role_type=RoleType.USER, - meta_dict=dict(), role="user", - content="Tell me a joke.") + user_msg = BaseMessage(role_name="User", role_type=RoleType.USER, + meta_dict=dict(), content="Tell me a joke.") assistant_response = assistant.step(user_msg) assert assistant_response.msgs is not None assert len(assistant_response.msgs) == n @@ -86,9 +85,9 @@ def test_chat_agent_multiple_return_messages(n): @pytest.mark.model_backend def test_set_output_language(): - system_message = SystemMessage(role_name="assistant", - role_type=RoleType.ASSISTANT, - content="You are a help assistant.") + system_message = BaseMessage(role_name="assistant", + role_type=RoleType.ASSISTANT, meta_dict=None, + content="You are a help assistant.") agent = ChatAgent(system_message=system_message, model=ModelType.GPT_3_5_TURBO) assert agent.output_language is None @@ -101,8 +100,8 @@ def test_set_output_language(): assert agent.output_language == output_language # Verify that the system message is updated with the new output language - updated_system_message = SystemMessage( - role_name="assistant", role_type="assistant", + updated_system_message = BaseMessage( + role_name="assistant", role_type="assistant", meta_dict=None, content="You are a help assistant." "\nRegardless of the input language, you must output text in Arabic.") assert agent.system_message.content == updated_system_message.content diff --git a/test/agents/test_critic_agent.py b/test/agents/test_critic_agent.py index b42e254458..fb1ff3e319 100644 --- a/test/agents/test_critic_agent.py +++ b/test/agents/test_critic_agent.py @@ -14,16 +14,17 @@ import pytest from camel.agents import CriticAgent -from camel.messages import ChatMessage, SystemMessage +from camel.messages import BaseMessage from camel.typing import RoleType @pytest.fixture def critic_agent() -> CriticAgent: return CriticAgent( - SystemMessage( + BaseMessage( "critic", RoleType.CRITIC, + None, content=("You are a critic who assists in selecting an option " "and provides explanations. " "Your favorite fruit is Apple. " @@ -33,18 +34,16 @@ def critic_agent() -> CriticAgent: def test_flatten_options(critic_agent: CriticAgent): messages = [ - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Apple", ), - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Banana", ), ] @@ -60,27 +59,24 @@ def test_flatten_options(critic_agent: CriticAgent): @pytest.mark.model_backend def test_get_option(critic_agent: CriticAgent): messages = [ - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Apple", ), - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Banana", ), ] flatten_options = critic_agent.flatten_options(messages) - input_message = ChatMessage( + input_message = BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content=flatten_options, ) assert critic_agent.options_dict == {"1": "Apple", "2": "Banana"} @@ -89,11 +85,10 @@ def test_get_option(critic_agent: CriticAgent): def test_parse_critic(critic_agent: CriticAgent): - critic_msg = ChatMessage( + critic_msg = BaseMessage( role_name="critic", role_type=RoleType.CRITIC, meta_dict=dict(), - role="assistant", content="I choose option 1", ) expected_output = "1" @@ -103,18 +98,16 @@ def test_parse_critic(critic_agent: CriticAgent): @pytest.mark.model_backend def test_step(critic_agent: CriticAgent): messages = [ - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Apple", ), - ChatMessage( + BaseMessage( role_name="user", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="Banana", ), ] diff --git a/test/agents/test_embodied_agent.py b/test/agents/test_embodied_agent.py index ecaf584d89..a4cfdc3255 100644 --- a/test/agents/test_embodied_agent.py +++ b/test/agents/test_embodied_agent.py @@ -15,7 +15,7 @@ from camel.agents import EmbodiedAgent, HuggingFaceToolAgent from camel.generators import SystemMessageGenerator -from camel.messages import ChatMessage, UserChatMessage +from camel.messages import BaseMessage from camel.typing import RoleType @@ -44,11 +44,11 @@ def test_step(): role_tuple=(f"{role_name}'s Embodiment", RoleType.EMBODIMENT)) embodied_agent = EmbodiedAgent(sys_msg, verbose=True) print(embodied_agent.system_message) - user_msg = UserChatMessage( + user_msg = BaseMessage.make_user_message( role_name=role_name, content="Draw all the Camelidae species.", ) output_message, terminated, info = embodied_agent.step(user_msg) - assert isinstance(output_message, ChatMessage) + assert isinstance(output_message, BaseMessage) assert not terminated assert isinstance(info, dict) diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index f66767716f..cecfa3e5c2 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -11,14 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -import typing - import pytest from camel.agents import ChatAgent -from camel.messages import AssistantChatMessage, ChatMessage, SystemMessageType +from camel.messages import BaseMessage from camel.societies import RolePlaying -from camel.typing import ModelType, TaskType +from camel.typing import ModelType, RoleType, TaskType def test_role_playing_init(): @@ -41,10 +39,10 @@ def test_role_playing_init(): assert role_playing.specified_task_prompt is None assert role_playing.planned_task_prompt is None - assert (type(role_playing.assistant_sys_msg) - in typing.get_args(SystemMessageType)) - assert (type(role_playing.user_sys_msg) - in typing.get_args(SystemMessageType)) + assert isinstance(role_playing.assistant_sys_msg, BaseMessage) + assert role_playing.assistant_sys_msg.role_type == RoleType.ASSISTANT + assert isinstance(role_playing.user_sys_msg, BaseMessage) + assert role_playing.user_sys_msg.role_type == RoleType.USER assert isinstance(role_playing.assistant_agent, ChatAgent) assert isinstance(role_playing.user_agent, ChatAgent) @@ -69,9 +67,8 @@ def test_role_playing_step(task_type, extend_sys_msg_meta_dicts, extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts, extend_task_specify_meta_dict=extend_task_specify_meta_dict, ) - init_assistant_msg = AssistantChatMessage(role_name="AI Assistant", - role="assistant", - content="Hello") + init_assistant_msg = BaseMessage.make_assistant_message( + role_name="AI Assistant", content="Hello") print(role_playing.assistant_agent.system_message) print(role_playing.user_agent.system_message) @@ -80,7 +77,7 @@ def test_role_playing_step(task_type, extend_sys_msg_meta_dicts, for response in (assistant_response, user_response): assert isinstance(response.msgs, list) assert len(response.msgs) == 1 - assert isinstance(response.msgs[0], ChatMessage) + assert isinstance(response.msgs[0], BaseMessage) assert isinstance(response.terminated, bool) assert response.terminated is False assert isinstance(response.info, dict) diff --git a/test/messages/test_chat_message.py b/test/messages/test_chat_message.py index 738fc9f72f..a6f0416588 100644 --- a/test/messages/test_chat_message.py +++ b/test/messages/test_chat_message.py @@ -13,35 +13,33 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== import pytest -from camel.messages import AssistantChatMessage, ChatMessage, UserChatMessage +from camel.messages import BaseMessage from camel.typing import RoleType @pytest.fixture -def chat_message() -> ChatMessage: - return ChatMessage( +def chat_message() -> BaseMessage: + return BaseMessage( role_name="test_role", role_type=RoleType.ASSISTANT, meta_dict=None, - role="assistant", content="test chat message", ) @pytest.fixture -def assistant_chat_message() -> AssistantChatMessage: - return AssistantChatMessage( +def assistant_chat_message() -> BaseMessage: + return BaseMessage( role_name="test_assistant", role_type=RoleType.ASSISTANT, meta_dict=None, - role="assistant", content="test assistant chat message", ) @pytest.fixture -def user_chat_message() -> UserChatMessage: - return UserChatMessage( +def user_chat_message() -> BaseMessage: + return BaseMessage( role_name="test_user", role_type=RoleType.USER, meta_dict=None, @@ -49,17 +47,15 @@ def user_chat_message() -> UserChatMessage: ) -def test_chat_message(chat_message: ChatMessage) -> None: +def test_chat_message(chat_message: BaseMessage) -> None: role_name = "test_role" role_type = RoleType.ASSISTANT meta_dict = None - role = "assistant" content = "test chat message" assert chat_message.role_name == role_name assert chat_message.role_type == role_type assert chat_message.meta_dict == meta_dict - assert chat_message.role == role assert chat_message.content == content dictionary = chat_message.to_dict() @@ -67,13 +63,11 @@ def test_chat_message(chat_message: ChatMessage) -> None: "role_name": role_name, "role_type": role_type.name, **(meta_dict or {}), - "role": role, "content": content, } -def test_assistant_chat_message( - assistant_chat_message: AssistantChatMessage) -> None: +def test_assistant_chat_message(assistant_chat_message: BaseMessage) -> None: role_name = "test_assistant" role_type = RoleType.ASSISTANT meta_dict = None @@ -82,7 +76,6 @@ def test_assistant_chat_message( assert assistant_chat_message.role_name == role_name assert assistant_chat_message.role_type == role_type assert assistant_chat_message.meta_dict == meta_dict - assert assistant_chat_message.role == "assistant" assert assistant_chat_message.content == content dictionary = assistant_chat_message.to_dict() @@ -90,12 +83,11 @@ def test_assistant_chat_message( "role_name": role_name, "role_type": role_type.name, **(meta_dict or {}), - "role": "assistant", "content": content, } -def test_user_chat_message(user_chat_message: UserChatMessage) -> None: +def test_user_chat_message(user_chat_message: BaseMessage) -> None: role_name = "test_user" role_type = RoleType.USER meta_dict = None @@ -104,7 +96,6 @@ def test_user_chat_message(user_chat_message: UserChatMessage) -> None: assert user_chat_message.role_name == role_name assert user_chat_message.role_type == role_type assert user_chat_message.meta_dict == meta_dict - assert user_chat_message.role == "user" assert user_chat_message.content == content dictionary = user_chat_message.to_dict() @@ -112,6 +103,5 @@ def test_user_chat_message(user_chat_message: UserChatMessage) -> None: "role_name": role_name, "role_type": role_type.name, **(meta_dict or {}), - "role": "user", "content": content, } diff --git a/test/messages/test_message_base.py b/test/messages/test_message_base.py index fe9957595d..5711d46d43 100644 --- a/test/messages/test_message_base.py +++ b/test/messages/test_message_base.py @@ -24,7 +24,6 @@ def base_message() -> BaseMessage: role_name="test_user", role_type=RoleType.USER, meta_dict={"key": "value"}, - role="user", content="test content", ) @@ -49,7 +48,7 @@ def test_base_message_contains_operator(base_message: BaseMessage): def test_base_message_token_len(base_message: BaseMessage): - token_len = base_message.token_len() + token_len = base_message.token_len("user") assert isinstance(token_len, int) assert token_len == 10 @@ -57,7 +56,7 @@ def test_base_message_token_len(base_message: BaseMessage): def test_extract_text_and_code_prompts(): base_message = BaseMessage( role_name="test_role_name", role_type=RoleType.USER, meta_dict=dict(), - role="user", content="This is a text prompt.\n\n" + content="This is a text prompt.\n\n" "```python\nprint('This is a code prompt')\n```\n" "This is another text prompt.\n\n" "```c\nprintf(\"This is another code prompt\");\n```") @@ -83,7 +82,6 @@ def test_base_message_to_dict(base_message: BaseMessage) -> None: "role_name": "test_user", "role_type": "USER", "key": "value", - "role": "user", "content": "test content", } assert base_message.to_dict() == expected_dict @@ -97,20 +95,16 @@ def test_base_message(): content = "test_content" message = BaseMessage(role_name=role_name, role_type=role_type, - meta_dict=meta_dict, role=role, content=content) + meta_dict=meta_dict, content=content) assert message.role_name == role_name assert message.role_type == role_type assert message.meta_dict == meta_dict - assert message.role == role assert message.content == content - openai_message = message.to_openai_message() + openai_message = message.to_openai_message(role) assert openai_message == {"role": role, "content": content} - openai_chat_message = message.to_openai_chat_message() - assert openai_chat_message == {"role": role, "content": content} - openai_system_message = message.to_openai_system_message() assert openai_system_message == {"role": "system", "content": content} @@ -127,6 +121,5 @@ def test_base_message(): assert dictionary == { "role_name": role_name, "role_type": role_type.name, - **(meta_dict or {}), "role": role, - "content": content + **(meta_dict or {}), "content": content } diff --git a/test/messages/test_system_message.py b/test/messages/test_system_message.py deleted file mode 100644 index 00d0974d8c..0000000000 --- a/test/messages/test_system_message.py +++ /dev/null @@ -1,61 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -import pytest - -from camel.messages import SystemMessage -from camel.typing import RoleType - - -@pytest.fixture -def system_message() -> SystemMessage: - return SystemMessage( - role_name="test_assistant", - role_type=RoleType.ASSISTANT, - meta_dict=None, - content="test system message", - ) - - -def test_system_message(): - role_name = "test_role_name" - role_type = RoleType.USER - meta_dict = {"key": "value"} - content = "test_content" - - message = SystemMessage(role_name=role_name, role_type=role_type, - meta_dict=meta_dict, content=content) - - assert message.role_name == role_name - assert message.role_type == role_type - assert message.meta_dict == meta_dict - assert message.role == "system" - assert message.content == content - - dictionary = message.to_dict() - assert dictionary == { - "role_name": role_name, - "role_type": role_type.name, - **(meta_dict or {}), "role": "system", - "content": content - } - - -def test_system_message_to_dict(system_message: SystemMessage) -> None: - expected_dict = { - "role_name": "test_assistant", - "role_type": "ASSISTANT", - "role": "system", - "content": "test system message", - } - assert system_message.to_dict() == expected_dict diff --git a/test/test_human.py b/test/test_human.py index e99fc02be4..b1a11f420e 100644 --- a/test/test_human.py +++ b/test/test_human.py @@ -12,14 +12,16 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from camel.human import Human -from camel.messages import AssistantChatMessage +from camel.messages import BaseMessage def test_display_options(): human = Human() msgs = [ - AssistantChatMessage(role_name="assistant", content="Hello"), - AssistantChatMessage(role_name="assistant", content="World"), + BaseMessage.make_assistant_message(role_name="assistant", + content="Hello"), + BaseMessage.make_assistant_message(role_name="assistant", + content="World"), ] human.display_options(msgs) @@ -27,8 +29,10 @@ def test_display_options(): def test_get_input(monkeypatch): human = Human() msgs = [ - AssistantChatMessage(role_name="assistant", content="Hello"), - AssistantChatMessage(role_name="assistant", content="World"), + BaseMessage.make_assistant_message(role_name="assistant", + content="Hello"), + BaseMessage.make_assistant_message(role_name="assistant", + content="World"), ] human.display_options(msgs) monkeypatch.setattr('builtins.input', lambda _: str(1)) @@ -38,8 +42,10 @@ def test_get_input(monkeypatch): def test_step(monkeypatch): human = Human() msgs = [ - AssistantChatMessage(role_name="assistant", content="Hello"), - AssistantChatMessage(role_name="assistant", content="World"), + BaseMessage.make_assistant_message(role_name="assistant", + content="Hello"), + BaseMessage.make_assistant_message(role_name="assistant", + content="World"), ] monkeypatch.setattr('builtins.input', lambda _: str(1))