diff --git a/README.md b/README.md index ab6a5de..6167f85 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ def count_letters(character: str, word: str) -> str: # Initialize agent with tools and LLM options agent = ChainOfThoughtAgent( tools=[count_letters], - llm_options=llm_options.LLAMA3_2_VISION + llm_options=llm_options.LLAMA3_3 ) # Optional: Add callback to see all messages diff --git a/core/just_agents/llm_options.py b/core/just_agents/llm_options.py index e66e27d..e92d7b7 100644 --- a/core/just_agents/llm_options.py +++ b/core/just_agents/llm_options.py @@ -29,6 +29,20 @@ class LLMOptionsBase(ModelOptions, extra="allow"): "tools": [] } +LLAMA3_3: dict[str, Any] = { + "model": "groq/llama-3.3-70b-versatile", + "api_base": "https://api.groq.com/openai/v1", + "temperature": 0.0, + "tools": [] +} + +LLAMA3_3_specdec: dict[str, Any] = { + "model": "groq/llama-3.3-70b-specdec", + "api_base": "https://api.groq.com/openai/v1", + "temperature": 0.0, + "tools": [] +} + MISTRAL_8x22B = { "model": "mistral/open-mixtral-8x22b", diff --git a/core/just_agents/patterns/chain_of_throught.py b/core/just_agents/patterns/chain_of_throught.py index c7ced99..7d804b4 100644 --- a/core/just_agents/patterns/chain_of_throught.py +++ b/core/just_agents/patterns/chain_of_throught.py @@ -32,9 +32,11 @@ class ChainOfThoughtAgent(BaseAgent, IThinkingAgent[SupportedMessages, Supported You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer. - Respond in JSON format with 'title', 'content', and 'next_action' (either 'continue' or 'final_answer') keys. - Make sure you send only one JSON step object. You response should be a valid JSON object. In the JSON use Use Triple Quotes for Multi-line Strings. - USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. + Respond in JSON format with "title", "content", and "next_action" (either "continue" or "final_answer") keys. + Make sure you send only one JSON step object AND NOTHING ELSE. + You response should be a valid JSON object. + In the JSON use Use Triple Quotes for Multi-line Strings. + USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 2. BE AWARE OF YOUR LIMITATIONS AS AN LLM AND WHAT YOU CAN AND CANNOT DO. IN YOUR REASONING, INCLUDE EXPLORATION OF ALTERNATIVE ANSWERS. CONSIDER YOU MAY BE WRONG, AND IF YOU ARE WRONG IN YOUR REASONING, WHERE IT WOULD BE. @@ -43,12 +45,11 @@ class ChainOfThoughtAgent(BaseAgent, IThinkingAgent[SupportedMessages, Supported DO NOT JUST SAY YOU ARE RE-EXAMINING. USE AT LEAST 3 METHODS TO DERIVE THE ANSWER. USE BEST PRACTICES. Example of a valid JSON response: - ```json { "title": "Identifying Key Information", "content": "To begin solving this problem, we need to carefully examine the given information and identify the crucial elements that will guide our solution process. This involves...", "next_action": "continue" - }``` + } """ diff --git a/core/just_agents/patterns/interfaces/IThinkingAgent.py b/core/just_agents/patterns/interfaces/IThinkingAgent.py index 8eb4ce4..e82878d 100644 --- a/core/just_agents/patterns/interfaces/IThinkingAgent.py +++ b/core/just_agents/patterns/interfaces/IThinkingAgent.py @@ -31,6 +31,6 @@ def think(self, thought = self.thought_query(query, **kwargs) #queries itself with thought as expected output new_chain = [*current_chain, thought] #updates chain with the new thought if thought.is_final() or max_iter <= 0: - return (self.thought_query(query, **kwargs), new_chain) #returns the final thought and the chain that preceded it + return (thought, new_chain) #returns the final thought and the chain that preceded it else: return self.think(query, max_iter - 1, new_chain, **kwargs) #continues the thought process \ No newline at end of file diff --git a/examples/just_agents/examples/basic/agent_profiles.yaml b/examples/just_agents/examples/basic/agent_profiles.yaml index 6747ea9..c4aa3e9 100644 --- a/examples/just_agents/examples/basic/agent_profiles.yaml +++ b/examples/just_agents/examples/basic/agent_profiles.yaml @@ -3,7 +3,7 @@ agent_profiles: class_qualname: just_agents.patterns.chain_of_throught.ChainOfThoughtAgent llm_options: api_base: https://api.groq.com/openai/v1 - model: groq/llama-3.2-90b-vision-preview + model: groq/llama-3.3-70b-versatile temperature: 0.0 tool_choice: auto tools: [] @@ -35,7 +35,7 @@ agent_profiles: class_qualname: just_agents.base_agent.BaseAgent llm_options: api_base: https://api.groq.com/openai/v1 - model: groq/llama-3.2-90b-vision-preview + model: groq/llama-3.3-70b-versatile temperature: 0.0 tool_choice: auto tools: [] diff --git a/examples/just_agents/examples/basic/agent_serialization.py b/examples/just_agents/examples/basic/agent_serialization.py index 1156f92..dcdcfe9 100644 --- a/examples/just_agents/examples/basic/agent_serialization.py +++ b/examples/just_agents/examples/basic/agent_serialization.py @@ -20,7 +20,7 @@ config_path = basic_examples_dir / "agent_profiles.yaml" created_agent = BaseAgent( - llm_options=llm_options.LLAMA3_2_VISION, + llm_options=llm_options.LLAMA3_3, config_path=config_path, tools=[mock_get_current_weather] ) diff --git a/examples/just_agents/examples/basic/chain_of_thought_example.py b/examples/just_agents/examples/basic/chain_of_thought_example.py index a448f01..eb0ea2b 100644 --- a/examples/just_agents/examples/basic/chain_of_thought_example.py +++ b/examples/just_agents/examples/basic/chain_of_thought_example.py @@ -25,7 +25,7 @@ # - LLAMA 3.2 Vision as the language model agent: ChainOfThoughtAgent = ChainOfThoughtAgent( # type: ignore tools=tools, - llm_options=llm_options.LLAMA3_2_VISION + llm_options=llm_options.LLAMA3_3 ) # Add a callback to print all messages that the agent processes diff --git a/examples/just_agents/examples/basic/function_calling_example.py b/examples/just_agents/examples/basic/function_calling_example.py index 63cf746..4125672 100644 --- a/examples/just_agents/examples/basic/function_calling_example.py +++ b/examples/just_agents/examples/basic/function_calling_example.py @@ -34,7 +34,7 @@ def get_current_weather(location: str): # 1. LLAMA3_2_VISION as the language model # 2. get_current_weather function as an available tool agent = BaseAgent( # type: ignore - llm_options=llm_options.LLAMA3_2_VISION, + llm_options=llm_options.LLAMA3_3, tools=[get_current_weather] ) diff --git a/examples/notebooks/01_just_agents_colab.ipynb b/examples/notebooks/01_just_agents_colab.ipynb index fcd498c..273b5e8 100644 --- a/examples/notebooks/01_just_agents_colab.ipynb +++ b/examples/notebooks/01_just_agents_colab.ipynb @@ -322,7 +322,7 @@ "source": [ "from just_agents import llm_options\n", "\n", - "llm_options: dict = llm_options.LLAMA3_2_VISION\n", + "llm_options: dict = llm_options.LLAMA3_3\n", "\n", "\"\"\"\n", "llm_options here are just dictionary of the following form:\n", @@ -546,7 +546,7 @@ " return \"Weather is snowy, there were numerous frozen lakes\"\n", "\n", "session: BaseAgent = BaseAgent(\n", - " llm_options=llm_options.LLAMA3_2_VISION,\n", + " llm_options=llm_options.LLAMA3_3,\n", " tools=[get_weather_by_city]\n", " #if you rename the function, then do not forget to change here\n", ")\n", @@ -732,7 +732,7 @@ "from just_agents import llm_options\n", "\n", "session: BaseAgent = BaseAgent(\n", - " llm_options=llm_options.LLAMA3_2_VISION,\n", + " llm_options=llm_options.LLAMA3_3,\n", " tools=[get_weather_by_city]\n", " #if you rename the function, then do not forget to change here\n", ")\n", @@ -952,7 +952,7 @@ "source": [ "from just_agents import llm_options\n", "\n", - "kamala_options = llm_options.LLAMA3_2_VISION\n", + "kamala_options = llm_options.LLAMA3_3\n", "trump_options: dict = {\n", " \"model\": \"groq/mixtral-8x7b-32768\",\n", " \"api_base\": \"https://api.groq.com/openai/v1\",\n", diff --git a/pyproject.toml b/pyproject.toml index 4207596..009b416 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "just-agents" -version = "0.4.2" +version = "0.4.3" description = "Just Agents - A lightweight, straightforward library for LLM agents that focuses on simplicity over unnecessary abstractions." authors = [ "Alex Karmazin ", @@ -27,12 +27,12 @@ just-agents-router = { path = "router", develop = true } just-agents-examples = { path = "examples", develop = true } [tool.poetry.group.publish.dependencies] -just-agents-core = "0.4.2" -just-agents-tools = "0.4.2" -just-agents-coding = "0.4.2" -just-agents-web = "0.4.2" -just-agents-router = "0.4.2" -just-agents-examples = "0.4.2" +just-agents-core = "0.4.3" +just-agents-tools = "0.4.3" +just-agents-coding = "0.4.3" +just-agents-web = "0.4.3" +just-agents-router = "0.4.3" +just-agents-examples = "0.4.3" [tool.poetry.group.dev.dependencies] pytest = "^7.4.4" diff --git a/tests/test_chain_of_thought.py b/tests/test_chain_of_thought.py index aea8b88..808873d 100644 --- a/tests/test_chain_of_thought.py +++ b/tests/test_chain_of_thought.py @@ -15,7 +15,7 @@ def count_letters(character:str, word:str) -> str: def test_function_query(): load_dotenv(override = True) - llm_options = just_agents.llm_options.OPENAI_GPT4oMINI.copy() + llm_options = just_agents.llm_options.LLAMA3_3 agent: ChainOfThoughtAgent = ChainOfThoughtAgent(llm_options=llm_options, tools=[count_letters]) result, thoughts = agent.think("Count the number of occurrences of the letter 'L' in the word - 'LOLLAPALOOZA'.") print("Thoughts: ", thoughts) diff --git a/tests/test_session.py b/tests/test_session.py index 9f41011..24621b3 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -25,7 +25,7 @@ def get_current_weather(location: str): def test_sync_function_calling(): load_dotenv(override=True) session: LLMSession = LLMSession( - llm_options=llm_options.LLAMA3_2_VISION, + llm_options=llm_options.LLAMA3_3, tools=[get_current_weather] ) result = session.query("What's the weather like in San Francisco, Tokyo, and Paris?") @@ -40,7 +40,7 @@ async def process_stream(async_generator): def test_stream_function_calling(): load_dotenv(override=True) session: LLMSession = LLMSession( - llm_options=llm_options.LLAMA3_2_VISION, + llm_options=llm_options.LLAMA3_3, tools=[get_current_weather] ) stream = session.stream("What's the weather like in San Francisco, Tokyo, and Paris?") @@ -55,7 +55,7 @@ def test_stream_function_calling(): def test_stream_genetics_function_calling(): load_dotenv(override=True) session: LLMSession = LLMSession( - llm_options=llm_options.LLAMA3_2_VISION, + llm_options=llm_options.LLAMA3_3, tools=[hybrid_search, rsid_lookup, gene_lookup, pathway_lookup, disease_lookup, sequencing_info, clinical_trails_full_trial] ) stream = session.stream("What is the influence of different alleles in rs10937739?")