Skip to content

Commit

Permalink
added dscriptions to yaml files and little refactoring.
Browse files Browse the repository at this point in the history
  • Loading branch information
Alex-Karmazin committed Oct 20, 2024
1 parent accc506 commit 8b436fa
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 63 deletions.
7 changes: 2 additions & 5 deletions examples/yaml_initialization_example.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
class: "LLMSession"
just_streaming_method: "openai"
system_prompt: "You are helpfull assistant that could count letters in word. To acomplish this use tool letter_count()"
completion_remove_key_on_error: True
completion_max_tries: 2
system_prompt: "You are helpful assistant that could count letters in word.
To acomplish this use tool letter_count()"
options:
model: "groq/llama-3.1-70b-versatile"
api_base: "https://api.groq.com/openai/v1"
Expand Down
38 changes: 19 additions & 19 deletions just_agents/config/cot_agent_prompt.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
class: "ChainOfThoughtAgent"
class: "ChainOfThoughtAgent" # class name to create could be LLMSession, ChainOfThoughtAgent. Default is LLMSession
system_prompt_path: # path to system prompt exclude use of system_prompt
system_prompt: "You are an expert AI assistant that explains your reasoning step by step.
For each step, provide a title that describes what you're doing in that step, along with the content.
Decide if you need another step or if you're ready to give the final answer.
Expand All @@ -19,23 +20,22 @@ system_prompt: "You are an expert AI assistant that explains your reasoning step
'content': 'To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...',
'next_action': 'continue'
}```
"
system_prompt_path:
final_prompt: "Please provide the final answer based solely on your reasoning above."
title: "title"
content: "content"
next_action: "next_action"
action_continue: "continue"
action_final: "final_answer"
thought_max_tokes: 300
max_steps: 25
final_max_tokens: 1200
tools:
" # system prompt exclude use of system_prompt_path
final_prompt: "Please provide the final answer based solely on your reasoning above." # prompt that will be passed before final answer
title: "title" # title parameter in system prompt
content: "content" # content parameter in system prompt
next_action: "next_action" # next_action parameter in system prompt
action_continue: "continue" # state of next_action parameter determine that next answer will be not final
action_final: "final_answer" # state of next_action parameter determine that next answer will be not final
thought_max_tokes: 300 # maximum length of thought
final_max_tokens: 1200 # maximum length of final answer
max_steps: 25 # maximum number of thought steps
tools: # list of functions that will be used as tools, each record should contain package and function name
# - package:
# function:
llm_session:
just_streaming_method: "openai"
completion_remove_key_on_error: True
completion_max_tries: 2
backup_options:
key_list_path:
llm_session: # initialization parameters for inner object LLMSession
just_streaming_method: "openai" # protocol to handle llm format for function calling
completion_remove_key_on_error: True # in case of using list of keys removing key from the list after error call with this key
completion_max_tries: 2 # maximum number of completion retries before giving up
backup_options: # options that will be used after we give up with main options, one more completion call will be done with backup options
key_list_path: # path to text file with list of api keys, one key per line
18 changes: 9 additions & 9 deletions just_agents/config/llm_session_schema.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
class: "LLMSession"
just_streaming_method: "openai"
system_prompt_path:
system_prompt:
completion_remove_key_on_error: True
completion_max_tries: 2
backup_options:
key_list_path:
tools:
class: "LLMSession" # class name to create could be LLMSession, ChainOfThoughtAgent. Default is LLMSession
just_streaming_method: "openai" # protocol to handle llm format for function calling
system_prompt_path: # path to system prompt exclude use of system_prompt
system_prompt: # system prompt exclude use of system_prompt_path
completion_remove_key_on_error: True # in case of using list of keys removing key from the list after error call with this key
completion_max_tries: 2 # maximum number of completion retries before giving up
backup_options: # options that will be used after we give up with main options, one more completion call will be done with backup options
key_list_path: # path to text file with list of api keys, one key per line
tools: # list of functions that will be used as tools, each record should contain package and function name
# - package:
# function:
33 changes: 3 additions & 30 deletions just_agents/interfaces/IAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,41 +7,14 @@ def build_agent(agent_schema: str | Path | dict):
from just_agents.llm_session import LLMSession
agent_schema = resolve_agent_schema(agent_schema)
class_name = agent_schema.get("class", None)
if class_name is None:
raise ValueError("Error class_name field should not be empty in agent_schema param during IAgent.build() call.")
elif class_name == "LLMSession":
if class_name is None or class_name == "LLMSession":
return LLMSession(agent_schema=agent_schema)
elif class_name == "ChainOfThoughtAgent":
return ChainOfThoughtAgent(agent_schema=agent_schema)

class IAgent:

# @staticmethod
# def build(agent_schema: dict):
# import importlib
# try:
# package_name = agent_schema.get("package", None)
# class_name = agent_schema.get("class", None)
#
# if package_name is None:
# raise ValueError("Error package_name field should not be empty in agent_schema param during IAgent.build() call.")
# if class_name is None:
# raise ValueError("Error class_name field should not be empty in agent_schema param during IAgent.build() call.")
# # Dynamically import the package
# package = importlib.import_module(package_name)
# # Get the class from the package
# class_ = getattr(package, class_name)
# # Create an instance of the class
# instance = class_(agent_schema=agent_schema)
#
# return instance
# except (ImportError, AttributeError) as e:
# print(f"Error creating instance of {class_name} from {package_name}: {e}")
# return None


def stream(self, input: str | dict | list[dict]) -> AsyncGenerator[Any, None]:
raise NotImplementedError("You need to impelement stream_add_all() first!")
raise NotImplementedError("You need to impelement stream() first!")

def query(self, input: str | dict | list[dict]) -> str:
raise NotImplementedError("You need to impelement query_add_all() first!")
raise NotImplementedError("You need to impelement query() first!")

0 comments on commit 8b436fa

Please sign in to comment.