Skip to content

Commit

Permalink
LLAMA3.3 support
Browse files Browse the repository at this point in the history
  • Loading branch information
Anton Kulaga committed Dec 7, 2024
1 parent fc43d8b commit 05aea44
Show file tree
Hide file tree
Showing 12 changed files with 42 additions and 27 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def count_letters(character: str, word: str) -> str:
# Initialize agent with tools and LLM options
agent = ChainOfThoughtAgent(
tools=[count_letters],
llm_options=llm_options.LLAMA3_2_VISION
llm_options=llm_options.LLAMA3_3
)

# Optional: Add callback to see all messages
Expand Down
14 changes: 14 additions & 0 deletions core/just_agents/llm_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,20 @@ class LLMOptionsBase(ModelOptions, extra="allow"):
"tools": []
}

LLAMA3_3: dict[str, Any] = {
"model": "groq/llama-3.3-70b-versatile",
"api_base": "https://api.groq.com/openai/v1",
"temperature": 0.0,
"tools": []
}

LLAMA3_3_specdec: dict[str, Any] = {
"model": "groq/llama-3.3-70b-specdec",
"api_base": "https://api.groq.com/openai/v1",
"temperature": 0.0,
"tools": []
}


MISTRAL_8x22B = {
"model": "mistral/open-mixtral-8x22b",
Expand Down
11 changes: 6 additions & 5 deletions core/just_agents/patterns/chain_of_throught.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,11 @@ class ChainOfThoughtAgent(BaseAgent, IThinkingAgent[SupportedMessages, Supported
You are an expert AI assistant that explains your reasoning step by step.
For each step, provide a title that describes what you're doing in that step, along with the content.
Decide if you need another step or if you're ready to give the final answer.
Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys.
Make sure you send only one JSON step object. You response should be a valid JSON object. In the JSON use Use Triple Quotes for Multi-line Strings.
USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3.
Respond in JSON format with "title", "content", and "next_action" (either "continue" or "final_answer") keys.
Make sure you send only one JSON step object AND NOTHING ELSE.
You response should be a valid JSON object.
In the JSON use Use Triple Quotes for Multi-line Strings.
USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 2.
BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO.
IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS.
CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE.
Expand All @@ -43,12 +45,11 @@ class ChainOfThoughtAgent(BaseAgent, IThinkingAgent[SupportedMessages, Supported
DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES.
Example of a valid JSON response:
```json
{
"title": "Identifying Key Information",
"content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...",
"next_action": "continue"
}```
}
"""


Expand Down
2 changes: 1 addition & 1 deletion core/just_agents/patterns/interfaces/IThinkingAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,6 @@ def think(self,
thought = self.thought_query(query, **kwargs) #queries itself with thought as expected output
new_chain = [*current_chain, thought] #updates chain with the new thought
if thought.is_final() or max_iter <= 0:
return (self.thought_query(query, **kwargs), new_chain) #returns the final thought and the chain that preceded it
return (thought, new_chain) #returns the final thought and the chain that preceded it
else:
return self.think(query, max_iter - 1, new_chain, **kwargs) #continues the thought process
4 changes: 2 additions & 2 deletions examples/just_agents/examples/basic/agent_profiles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ agent_profiles:
class_qualname: just_agents.patterns.chain_of_throught.ChainOfThoughtAgent
llm_options:
api_base: https://api.groq.com/openai/v1
model: groq/llama-3.2-90b-vision-preview
model: groq/llama-3.3-70b-versatile
temperature: 0.0
tool_choice: auto
tools: []
Expand Down Expand Up @@ -35,7 +35,7 @@ agent_profiles:
class_qualname: just_agents.base_agent.BaseAgent
llm_options:
api_base: https://api.groq.com/openai/v1
model: groq/llama-3.2-90b-vision-preview
model: groq/llama-3.3-70b-versatile
temperature: 0.0
tool_choice: auto
tools: []
Expand Down
2 changes: 1 addition & 1 deletion examples/just_agents/examples/basic/agent_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
config_path = basic_examples_dir / "agent_profiles.yaml"

created_agent = BaseAgent(
llm_options=llm_options.LLAMA3_2_VISION,
llm_options=llm_options.LLAMA3_3,
config_path=config_path,
tools=[mock_get_current_weather]
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# - LLAMA 3.2 Vision as the language model
agent: ChainOfThoughtAgent = ChainOfThoughtAgent( # type: ignore
tools=tools,
llm_options=llm_options.LLAMA3_2_VISION
llm_options=llm_options.LLAMA3_3
)

# Add a callback to print all messages that the agent processes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_current_weather(location: str):
# 1. LLAMA3_2_VISION as the language model
# 2. get_current_weather function as an available tool
agent = BaseAgent( # type: ignore
llm_options=llm_options.LLAMA3_2_VISION,
llm_options=llm_options.LLAMA3_3,
tools=[get_current_weather]
)

Expand Down
8 changes: 4 additions & 4 deletions examples/notebooks/01_just_agents_colab.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@
"source": [
"from just_agents import llm_options\n",
"\n",
"llm_options: dict = llm_options.LLAMA3_2_VISION\n",
"llm_options: dict = llm_options.LLAMA3_3\n",
"\n",
"\"\"\"\n",
"llm_options here are just dictionary of the following form:\n",
Expand Down Expand Up @@ -546,7 +546,7 @@
" return \"Weather is snowy, there were numerous frozen lakes\"\n",
"\n",
"session: BaseAgent = BaseAgent(\n",
" llm_options=llm_options.LLAMA3_2_VISION,\n",
" llm_options=llm_options.LLAMA3_3,\n",
" tools=[get_weather_by_city]\n",
" #if you rename the function, then do not forget to change here\n",
")\n",
Expand Down Expand Up @@ -732,7 +732,7 @@
"from just_agents import llm_options\n",
"\n",
"session: BaseAgent = BaseAgent(\n",
" llm_options=llm_options.LLAMA3_2_VISION,\n",
" llm_options=llm_options.LLAMA3_3,\n",
" tools=[get_weather_by_city]\n",
" #if you rename the function, then do not forget to change here\n",
")\n",
Expand Down Expand Up @@ -952,7 +952,7 @@
"source": [
"from just_agents import llm_options\n",
"\n",
"kamala_options = llm_options.LLAMA3_2_VISION\n",
"kamala_options = llm_options.LLAMA3_3\n",
"trump_options: dict = {\n",
" \"model\": \"groq/mixtral-8x7b-32768\",\n",
" \"api_base\": \"https://api.groq.com/openai/v1\",\n",
Expand Down
14 changes: 7 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "just-agents"
version = "0.4.2"
version = "0.4.3"
description = "Just Agents - A lightweight, straightforward library for LLM agents that focuses on simplicity over unnecessary abstractions."
authors = [
"Alex Karmazin <karmazinalex@gmail.com>",
Expand All @@ -27,12 +27,12 @@ just-agents-router = { path = "router", develop = true }
just-agents-examples = { path = "examples", develop = true }

[tool.poetry.group.publish.dependencies]
just-agents-core = "0.4.2"
just-agents-tools = "0.4.2"
just-agents-coding = "0.4.2"
just-agents-web = "0.4.2"
just-agents-router = "0.4.2"
just-agents-examples = "0.4.2"
just-agents-core = "0.4.3"
just-agents-tools = "0.4.3"
just-agents-coding = "0.4.3"
just-agents-web = "0.4.3"
just-agents-router = "0.4.3"
just-agents-examples = "0.4.3"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.4"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_chain_of_thought.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def count_letters(character:str, word:str) -> str:
def test_function_query():
load_dotenv(override = True)

llm_options = just_agents.llm_options.OPENAI_GPT4oMINI.copy()
llm_options = just_agents.llm_options.LLAMA3_3
agent: ChainOfThoughtAgent = ChainOfThoughtAgent(llm_options=llm_options, tools=[count_letters])
result, thoughts = agent.think("Count the number of occurrences of the letter 'L' in the word - 'LOLLAPALOOZA'.")
print("Thoughts: ", thoughts)
Expand Down
6 changes: 3 additions & 3 deletions tests/test_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def get_current_weather(location: str):
def test_sync_function_calling():
load_dotenv(override=True)
session: LLMSession = LLMSession(
llm_options=llm_options.LLAMA3_2_VISION,
llm_options=llm_options.LLAMA3_3,
tools=[get_current_weather]
)
result = session.query("What's the weather like in San Francisco, Tokyo, and Paris?")
Expand All @@ -40,7 +40,7 @@ async def process_stream(async_generator):
def test_stream_function_calling():
load_dotenv(override=True)
session: LLMSession = LLMSession(
llm_options=llm_options.LLAMA3_2_VISION,
llm_options=llm_options.LLAMA3_3,
tools=[get_current_weather]
)
stream = session.stream("What's the weather like in San Francisco, Tokyo, and Paris?")
Expand All @@ -55,7 +55,7 @@ def test_stream_function_calling():
def test_stream_genetics_function_calling():
load_dotenv(override=True)
session: LLMSession = LLMSession(
llm_options=llm_options.LLAMA3_2_VISION,
llm_options=llm_options.LLAMA3_3,
tools=[hybrid_search, rsid_lookup, gene_lookup, pathway_lookup, disease_lookup, sequencing_info, clinical_trails_full_trial]
)
stream = session.stream("What is the influence of different alleles in rs10937739?")
Expand Down

0 comments on commit 05aea44

Please sign in to comment.