Skip to content

Commit

Permalink
added yaml support to agents. All the prompts now in yaml. Introduced…
Browse files Browse the repository at this point in the history
… just-agents object in options dictionary. Here will be our all additional parameters.
  • Loading branch information
Alex-Karmazin committed Oct 6, 2024
1 parent 434f877 commit 0206663
Show file tree
Hide file tree
Showing 2 changed files with 70 additions and 38 deletions.
27 changes: 27 additions & 0 deletions just_agents/config/cot_agent_prompt.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
system_prompt: "You are an expert AI assistant that explains your reasoning step by step.
For each step, provide a title that describes what you're doing in that step, along with the content.
Decide if you need another step or if you're ready to give the final answer.
Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys.
Make sure you send only one JSON step object.
USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3.
BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO.
IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS.
CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE.
FULLY TEST ALL OTHER POSSIBILITIES.
YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO.
DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
Example of a valid JSON response:
```json
{
'title': 'Identifying Key Information',
'content': 'To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...',
'next_action': 'continue'
}```
"
final_prompt: "Please provide the final answer based solely on your reasoning above."
title: "title"
content: "content"
next_action: "next_action"
action_continue: "continue"
action_final: "final_answer"
81 changes: 43 additions & 38 deletions just_agents/cot_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,74 +2,79 @@
import json
from just_agents.streaming.protocols.openai_streaming import OpenaiStreamingProtocol
from just_agents.streaming.protocols.abstract_protocol import AbstractStreamingProtocol

FINAL_PROMPT = "Please provide the final answer based solely on your reasoning above."
DEFAULT_SYSTEM_PROMPT = """You are an expert AI assistant that explains your reasoning step by step.
For each step, provide a title that describes what you're doing in that step, along with the content.
Decide if you need another step or if you're ready to give the final answer.
Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys.
Make sure you send only one JSON step object.
USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3.
BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO.
IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS.
CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE.
FULLY TEST ALL OTHER POSSIBILITIES.
YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO.
DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
Example of a valid JSON response:
```json
{
"title": "Identifying Key Information",
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
"next_action": "continue"
}```
"""
from pathlib import Path, PurePath
import yaml

class ChainOfThoughtAgent():

def __init__(self, llm_options, tools = None, system_prompt:str = DEFAULT_SYSTEM_PROMPT, output_streaming:AbstractStreamingProtocol = OpenaiStreamingProtocol()):
def __init__(self, llm_options, prompt_schema: any = None, tools: list = None, output_streaming:AbstractStreamingProtocol = OpenaiStreamingProtocol()):
self.session: LLMSession = LLMSession(llm_options=llm_options, tools=tools)
if system_prompt is not None:
self.session.instruct(system_prompt)
if prompt_schema is None:
prompt_schema = Path(Path(__file__).parent, "config", "cot_agent_prompt.yaml")
if isinstance(prompt_schema, str):
prompt_schema = Path(prompt_schema)
if isinstance(prompt_schema, Path):
if not prompt_schema.exists():
raise ValueError(
f"In ChainOfThoughtAgent constructor prompt_schema path is not exists: ({prompt_schema})!")
with open(prompt_schema) as f:
prompt_schema = yaml.full_load(f)
if not isinstance(prompt_schema, dict):
raise ValueError("In ChainOfThoughtAgent constructor prompt_schema parameter should be None, string, Path or dict!")

self.prompt_schema: dict = prompt_schema
self.session.instruct(self.prompt_schema["system_prompt"])
self.output_streaming: AbstractStreamingProtocol = output_streaming


def stream(self, prompt, max_steps: int = 25, thought_max_tokes:int = 300, final_max_tokens:int = 1200, final_prompt:str = FINAL_PROMPT):
def get_param(self, key: str, default: any):
just_agents = self.session.llm_options.get("just-agents", None)
if just_agents is None:
return default
return just_agents.get(key, default)


def stream(self, prompt):
thought_max_tokes = self.get_param("thought_max_tokes", 300)
self.session.update_options("max_tokens", thought_max_tokes)
self.session.update_options("response_format", {"type": "json_object"})
step_data = json.loads(self.session.query(prompt))
content = step_data['content'] + "\n"
content = step_data[self.prompt_schema["content"]] + "\n"
yield self.output_streaming.get_chunk(0, content, self.session.llm_options)
max_steps = self.get_param("max_steps", 25)
for step_count in range(1, max_steps):
step_data = json.loads(self.session.proceed())
content = step_data['content'] + "\n"
content = step_data[self.prompt_schema["content"]] + "\n"
yield self.output_streaming.get_chunk(step_count, content, self.session.llm_options)
if step_data['next_action'] == 'final_answer':
if step_data[self.prompt_schema["next_action"]] == self.prompt_schema["action_final"]:
break

final_max_tokens = self.get_param("final_max_tokens", 1200)
self.session.update_options("max_tokens", final_max_tokens)
final_data = json.loads(self.session.query(final_prompt))
yield self.output_streaming.get_chunk(step_count + 1, final_data['content'], self.session.llm_options)
final_data = json.loads(self.session.query(self.prompt_schema["final_prompt"]))
yield self.output_streaming.get_chunk(step_count + 1, final_data[self.prompt_schema["content"]], self.session.llm_options)
yield self.output_streaming.done()


def query(self, prompt, max_steps: int = 25, thought_max_tokes:int = 300, final_max_tokens:int = 1200, final_prompt:str = FINAL_PROMPT):
def query(self, prompt):
thought_max_tokes = self.get_param("thought_max_tokes", 300)
self.session.update_options("max_tokens", thought_max_tokes)
self.session.update_options("response_format", {"type": "json_object"})
step_data = json.loads(self.session.query(prompt))
content = step_data['content'] + "\n"
content = step_data[self.prompt_schema["content"]] + "\n"
thoughts:str = content
max_steps = self.get_param("max_steps", 25)
for step_count in range(1, max_steps):
step_data = json.loads(self.session.proceed())
content = step_data['content'] + "\n"
content = step_data[self.prompt_schema["content"]] + "\n"
thoughts += content
if step_data['next_action'] == 'final_answer':
if step_data[self.prompt_schema["next_action"]] == self.prompt_schema["action_final"]:
break

final_max_tokens = self.get_param("final_max_tokens", 1200)
self.session.update_options("max_tokens", final_max_tokens)
final_data = json.loads(self.session.query(final_prompt))
return final_data['content'], thoughts
final_data = json.loads(self.session.query(self.prompt_schema["final_prompt"]))
return final_data[self.prompt_schema["content"]], thoughts

def last_message(self):
return self.session.memory.last_message

0 comments on commit 0206663

Please sign in to comment.