Skip to content

Commit

Permalink
chore: add openai test script
Browse files Browse the repository at this point in the history
  • Loading branch information
naaive committed Dec 4, 2024
1 parent 3ee834a commit aeaa43e
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 10 deletions.
20 changes: 10 additions & 10 deletions openagent/router/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,26 +168,26 @@ async def create_chat_completion(request: ChatCompletionRequest):
agent = build_workflow(llm)

combined_message = "\n".join([f"{msg.role}: {msg.content}" for msg in request.messages])

tool_calls = []
assistant_message = None

async for event in agent.astream_events(
{"messages": [HumanMessage(content=combined_message)]},
version="v1"
{"messages": [HumanMessage(content=combined_message)]},
version="v1"
):
if event["event"] == "on_tool_end":
tool_name = event["name"]
tool_input = event["data"]["input"]

tool_call = ToolCall(
function={
"name": tool_name,
"arguments": json.dumps(tool_input)
}
)
tool_calls.append(tool_call)

elif event["event"] == "on_chat_model_stream":
if isinstance(event["data"]["chunk"].content, str):
assistant_message = (assistant_message or "") + event["data"]["chunk"].content
Expand All @@ -199,7 +199,7 @@ async def create_chat_completion(request: ChatCompletionRequest):
choice = ChatChoice(
index=0,
message=ChatMessage(
role="assistant",
role="assistant",
content=assistant_message,
tool_calls=tool_calls if tool_calls else None
),
Expand All @@ -209,7 +209,7 @@ async def create_chat_completion(request: ChatCompletionRequest):
# Estimate token usage
prompt_tokens = sum(len(msg.content.split()) * 1.3 for msg in request.messages)
completion_tokens = len(assistant_message.split()) * 1.3 if assistant_message else 0

usage = Usage(
prompt_tokens=int(prompt_tokens),
completion_tokens=int(completion_tokens),
Expand Down Expand Up @@ -244,7 +244,7 @@ async def stream_chat_completion(request: ChatCompletionRequest):
model=request.model,
choices=[StreamChoice(
index=0,
delta=DeltaMessage(role="assistant"),
delta=DeltaMessage(role="assistant", content=""),
)]
)
yield f"data: {chunk.json()}\n\n"
Expand All @@ -270,15 +270,15 @@ async def stream_chat_completion(request: ChatCompletionRequest):
# Handle tool responses
tool_name = event["name"]
tool_input = event["data"]["input"]

# Create a tool call response
tool_call = ToolCall(
function={
"name": tool_name,
"arguments": json.dumps(tool_input)
}
)

chunk = ChatCompletionStreamResponse(
model=request.model,
choices=[StreamChoice(
Expand Down
Empty file added tests/openai/__init__.py
Empty file.
27 changes: 27 additions & 0 deletions tests/openai/example.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": "Hello!"
}
]
}'




curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4o",
"stream": "true",
"messages": [
{
"role": "user",
"content": "Hello!"
}
]
}'

0 comments on commit aeaa43e

Please sign in to comment.