Skip to content

Commit

Permalink
Litellm bug test
Browse files Browse the repository at this point in the history
  • Loading branch information
winternewt committed Jan 8, 2025
1 parent 54782fd commit 4eb2a3e
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 9 deletions.
9 changes: 0 additions & 9 deletions core/just_agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,15 +227,6 @@ def streaming_query_with_current_memory(self, reconstruct_chunks = False, **kwar
else:
yield self._protocol.sse_wrap(part.model_dump(mode='json'))

# if self.tools and not self._tool_fuse_broken:
# tool_calls = self._protocol.tool_calls_from_message(msg)
# if tool_calls:
# self.add_to_memory(
# self._protocol.function_convention.reconstruct_tool_call_message(tool_calls)
# )
# self._process_function_calls(tool_calls)
# tool_messages.append(self._process_function_calls(tool_calls))

if len(self._partial_streaming_chunks) > 0:
assembly = self._protocol.response_from_deltas(self._partial_streaming_chunks)
self._partial_streaming_chunks.clear()
Expand Down
64 changes: 64 additions & 0 deletions tests/test_pure_litellm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import json
import litellm
import pytest
from dotenv import load_dotenv



def get_current_weather(location: str):
"""
Gets the current weather in a given location
"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})

@pytest.fixture(scope="module", autouse=True)
def load_env():
load_dotenv(override=True)
messages = [
{"role": "system", "content": "You are a helpful AI assistant"},
{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}
]
tools = [{"type": "function",
"function": litellm.utils.function_to_dict(get_current_weather)}]
OPENAI_GPT4oMINI = {
"messages": messages,
"model": "gpt-4o-mini",
"temperature": 0.0,
"tools": tools,
"stream": True
}
LLAMA3_3 = {
"messages" : messages,
"model": "groq/llama-3.3-70b-versatile",
"api_base": "https://api.groq.com/openai/v1",
"temperature": 0.0,
"tools": tools,
"stream" : True
}
return OPENAI_GPT4oMINI, LLAMA3_3

def execute_completion(opts:dict):
partial_streaming_chunks = []
response_gen = litellm.completion(**opts)
for i, part in enumerate(response_gen):
partial_streaming_chunks.append(part)
assembly = litellm.stream_chunk_builder(partial_streaming_chunks)
print(assembly.choices[0].message.tool_calls)
assert len(assembly.choices[0].message.tool_calls) == 3, assembly.choices[0].message.tool_calls[0].function.arguments[0]
print (assembly.choices[0].message.tool_calls)

def test_oai_works(load_env):
OPENAI_GPT4oMINI,_ = load_env
execute_completion(OPENAI_GPT4oMINI)

@pytest.mark.skip(reason="until fixed in https://github.com/BerriAI/litellm/issues/7621")
def test_grok_bug(load_env):
_, LLAMA3_3 = load_env
execute_completion(LLAMA3_3)

0 comments on commit 4eb2a3e

Please sign in to comment.