Skip to content

Commit

Permalink
found some bugs, also commented place for protocol bugs, updated just…
Browse files Browse the repository at this point in the history
…-agents web a bit
  • Loading branch information
antonkulaga committed Jan 7, 2025
1 parent 7a03773 commit 343924a
Show file tree
Hide file tree
Showing 15 changed files with 2,833 additions and 98 deletions.
29 changes: 29 additions & 0 deletions config/agent_profiles.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,32 @@
SecretaryAgent:
autoload_from_yaml: false
backstory: Developed to assist in the analysis and description of AI agents.
class_qualname: just_agents.router.secretary_agent.SecretaryAgent
description: A skilled AI assistant focused on creating concise and informative
profiles for AI agents.
expertise_domain: AI analysis and profiling
extra_dict:
personality_traits: Agent's personality traits go here
goal: To provide accurate and detailed descriptions of AI agents.
knowledge_sources: []
limitations: May not have access to real-time data or external databases.
llm_options:
model: gpt-4o-mini
temperature: 0.0
model_name: gpt-4o-mini
personality_traits: A skilled, detail-oriented, and concise AI assistant focused
on analysis and description of AI agents.
role: AI Assistant
system_prompt: |2-
You are a skilled AI assistant specializing in analysis and description of AI agents.
You are tasked with generation of a minimalistic and concise yet detail-rich profile for an AI agent, based on the AVAILABLE_ATTRIBUTES,
including 'system_prompt', 'llm_options' and any other. Your task is to fill in values of a JSON-formatted profile
that matches the PROFILE_UPDATE_TEMPLATE provided below. Values of the template describe what output is expected for each field.
Only populate fields based on the well-established information, don't make up anything.
Double-check that the output contains only a valid JSON with all the fields specified in PROFILE_UPDATE_TEMPLATE.
Never include any additional text or explanations in your reply.
task: Generate detailed profiles for AI agents based on provided attributes.
agent_profiles:
SecretaryAgent:
autoload_from_yaml: false
Expand Down
3 changes: 2 additions & 1 deletion core/just_agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def _prepare_options(self, options: LLMOptions):
opt["tools"] = [{"type": "function",
"function": self.tools[tool].get_litellm_description()} for tool in self.tools]
return opt

def _execute_completion(
self,
stream: bool,
Expand Down Expand Up @@ -176,6 +176,7 @@ def _execute_completion(
return self._protocol.completion(messages=self.memory.messages, stream=stream, **opt)



def _process_function_calls(self, function_calls: List[IFunctionCall[SupportedMessages]]) -> SupportedMessages:
messages: SupportedMessages = []
for call in function_calls:
Expand Down
9 changes: 5 additions & 4 deletions core/just_agents/interfaces/protocol_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
from just_agents.interfaces.function_call import IFunctionCall

BaseModelResponse = TypeVar('BaseModelResponse', bound=BaseModel)
BaseModelStreamWrapper = TypeVar('BaseModelStreamWrapper', bound=BaseModel)
AbstractMessage = TypeVar("AbstractMessage")

ModelResponseCallback=Callable[...,BaseModelResponse]
MessageUnpackCallback=Callable[[BaseModelResponse], AbstractMessage]
ExecuteToolCallback=Callable[[Sequence[IFunctionCall]],List[AbstractMessage]]

class IProtocolAdapter(IAbstractStreamingProtocol, ABC, Generic[BaseModelResponse, AbstractMessage]):
class IProtocolAdapter(IAbstractStreamingProtocol, ABC, Generic[BaseModelResponse, AbstractMessage, BaseModelStreamWrapper]):
"""
Class that is required to wrap the model protocol
"""
Expand All @@ -32,19 +33,19 @@ def message_from_response(self, response: BaseModelResponse) -> AbstractMessage:
raise NotImplementedError("You need to implement message_from_response first!")

@abstractmethod
def message_from_delta(self, response: BaseModelResponse) -> AbstractMessage:
def message_from_delta(self, response: BaseModelStreamWrapper) -> AbstractMessage:
raise NotImplementedError("You need to implement message_from_delta first!")

@abstractmethod
def content_from_delta(self, delta: AbstractMessage) -> str:
def content_from_delta(self, delta: BaseModelStreamWrapper) -> str:
raise NotImplementedError("You need to implement content_from_delta first!")

@abstractmethod
def tool_calls_from_message(self, message: AbstractMessage) -> List[IFunctionCall[AbstractMessage]]:
raise NotImplementedError("You need to implement tool_calls_from_response first!")

@abstractmethod
def response_from_deltas(self, deltas: List[BaseModelResponse]) -> BaseModelResponse:
def response_from_deltas(self, deltas: List[BaseModelStreamWrapper]) -> BaseModelResponse:
raise NotImplementedError("You need to implement message_from_deltas first!")

def get_chunk(self, index:int, delta:str, options:dict) -> BaseModelResponse:
Expand Down
2 changes: 1 addition & 1 deletion core/just_agents/just_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class JustAgentProfile(JustSerializable):
A Pydantic model representing an agent profile
"""
DEFAULT_GENERIC_PROMPT: ClassVar[str] = "You are a helpful AI assistant"
DEFAULT_PARENT_SECTION: ClassVar[str] = 'agent_profiles'
DEFAULT_PARENT_SECTION: ClassVar[str] = None#'agent_profiles'
DEFAULT_CONFIG_PATH: ClassVar[Path] = Path('./config/agent_profiles.yaml')
config_parent_section: Optional[str] = Field(DEFAULT_PARENT_SECTION, exclude=True)

Expand Down
4 changes: 3 additions & 1 deletion core/just_agents/just_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ class JustSerializable(BaseModel, extra="allow", use_enum_values=True, validate_
"""
DEFAULT_CONFIG_PATH : ClassVar[Path] = Path('./config/default_config.yaml')
DEFAULT_PARENT_SECTION : ClassVar[Optional[str]] = None
DEFAULT_SECTION_NAME : ClassVar[Optional[str]] = 'RenameMe'
DEFAULT_SECTION_NAME : ClassVar[Optional[str]] = "Agent" #'RenameMe'
#MODULE_DIR : ClassVar[Path] = Path(os.path.abspath(os.path.dirname(__file__)))

config_path : Optional[Path] = Field(None,exclude=True)
Expand Down Expand Up @@ -327,6 +327,8 @@ def from_yaml(cls, section_name: str,
file_path = cls.DEFAULT_CONFIG_PATH
if parent_section is None:
parent_section = cls.DEFAULT_PARENT_SECTION
if section_name is None:
section_name = cls.DEFAULT_SECTION_NAME
section_data = JustYaml.read_yaml_data(
file_path,
section_name,
Expand Down
15 changes: 10 additions & 5 deletions core/just_agents/protocols/litellm_protocol.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import pprint

from litellm import ModelResponse, CustomStreamWrapper, completion, acompletion, stream_chunk_builder
from typing import Optional, Union, Coroutine, ClassVar, Type, Sequence, List, Any, AsyncGenerator
Expand All @@ -10,7 +11,7 @@
from just_agents.interfaces.protocol_adapter import IProtocolAdapter, ExecuteToolCallback
from just_agents.interfaces.streaming_protocol import IAbstractStreamingProtocol
from just_agents.protocols.openai_streaming import OpenaiStreamingProtocol

from litellm.types.utils import StreamingChoices, Delta
#from openai.types import CompletionUsage
#from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionToolMessageParam,ChatCompletionFunctionMessageParam
#from openai.types.chat.chat_completion import ChatCompletion, Choice, ChatCompletionMessage
Expand Down Expand Up @@ -68,7 +69,7 @@ def reconstruct_tool_call_message(calls: Sequence['LiteLLMFunctionCall']) -> dic
return {"role": "assistant", "content": None, "tool_calls": tool_calls}


class LiteLLMAdapter(BaseModel, IProtocolAdapter[ModelResponse,MessageDict]):
class LiteLLMAdapter(BaseModel, IProtocolAdapter[ModelResponse,MessageDict, CustomStreamWrapper]):
#Class that describes function convention
function_convention: ClassVar[Type[IFunctionCall[MessageDict]]] = LiteLLMFunctionCall
#hooks to agent class
Expand All @@ -78,12 +79,14 @@ class LiteLLMAdapter(BaseModel, IProtocolAdapter[ModelResponse,MessageDict]):
def model_post_init(self, __context: Any) -> None:
super().model_post_init(__context)

def completion(self, *args, **kwargs) -> ModelResponse:
def completion(self, *args, **kwargs) -> Union[ModelResponse, CustomStreamWrapper]: # for the stream it is CustomStreamWrapper
return completion(*args, **kwargs)

async def async_completion(self, *args, **kwargs) \
-> Coroutine[Any, Any, Union[ModelResponse, CustomStreamWrapper, AsyncGenerator]]:
return acompletion(*args, **kwargs)

# TODO: what about https://docs.litellm.ai/docs/providers/custom_llm_server ?

def message_from_response(self, response: ModelResponse) -> MessageDict:
message = response.choices[0].message.model_dump(
Expand All @@ -98,7 +101,8 @@ def message_from_response(self, response: ModelResponse) -> MessageDict:
assert "function_call" not in message
return message

def message_from_delta(self, response: ModelResponse) -> MessageDict:
# TODO: wrong old stuff, YOU DO NOT GET A RESPONSE BUT YOU GET CustomStreamWrapper
def message_from_delta(self, response: CustomStreamWrapper): # ModelResponse) -> MessageDict:
message = response.choices[0].delta.model_dump(
mode="json",
exclude_none=True,
Expand All @@ -108,7 +112,7 @@ def message_from_delta(self, response: ModelResponse) -> MessageDict:
)
assert "function_call" not in message
return message

def content_from_delta(self, delta: MessageDict) -> str:
return delta.get("content")

Expand All @@ -126,4 +130,5 @@ def tool_calls_from_message(self, message: MessageDict) -> List[LiteLLMFunctionC

def response_from_deltas(self, chunks: List[Any]) -> ModelResponse:
return stream_chunk_builder(chunks)
complete_response = litellm.stream_chunk_builder(chunks=chunks, messages=messages)

3 changes: 2 additions & 1 deletion core/just_agents/protocols/protocol_factory.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from enum import Enum
from just_agents.interfaces.protocol_adapter import IProtocolAdapter, ExecuteToolCallback


class StreamingMode(str, Enum):
openai = "openai"
echo = "echo"
Expand All @@ -27,7 +28,7 @@ def get_protocol_adapter(
execute_function_hook=execute_functions,
)
elif mode == StreamingMode.echo:
from just_agents.protocols.mock_protocol import EchoProtocolAdapter
from just_agents.protocols.echo_protocol import EchoProtocolAdapter
return EchoProtocolAdapter(
execute_function_hook=execute_functions,
)
Expand Down
10 changes: 10 additions & 0 deletions examples/just_agents/examples/web/agent.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Agent:
class_qualname: just_agents.base_agent.BaseAgent
llm_options:
api_base: https://api.groq.com/openai/v1
model: groq/llama-3.3-70b-versatile
temperature: 0.0
tool_choice: auto
tools: []
system_prompt: |
You are a super-nice agent. You are always helpful, friendly and very polite. You start the conversation with "It is an honor to serve you"
26 changes: 0 additions & 26 deletions examples/just_agents/examples/web/nice_agent.yaml

This file was deleted.

7 changes: 5 additions & 2 deletions examples/just_agents/examples/web/nice_web.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from just_agents.web.rest_api import *
from just_agents.web.run import *

from pycomfort.logging import to_nice_stdout

load_dotenv(override=True)

web_examples_dir = Path(__file__).parent.absolute()
Expand All @@ -15,5 +17,6 @@
"""

if __name__ == "__main__":
cofig_path = web_examples_dir / "nice_agent.yaml"
run_server(config=cofig_path, agent_parent_section="agent_profiles", agent_section="SimpleWeatherAgent")
to_nice_stdout()
cofig_path = web_examples_dir / "agent.yaml"
run_server(config=cofig_path)
42 changes: 0 additions & 42 deletions pyproject.toml.bak

This file was deleted.

40 changes: 31 additions & 9 deletions web/just_agents/web/rest_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
import loguru
import yaml
import os
from pycomfort.logging import log_function
from eliot import log_call
import json


class AgentRestAPI(FastAPI):
Expand Down Expand Up @@ -73,7 +76,6 @@ def _agent_related_config(self, agent_config: Path | str, agent_section: Optiona
if agent_config is None:
# Load from environment variable or use default
agent_config = os.getenv('AGENT_CONFIG_PATH', 'agent_profiles.yaml')

self.agent = BaseAgent.from_yaml(file_path=agent_config, section_name=agent_section, parent_section=agent_parent_section)


Expand All @@ -92,6 +94,8 @@ def _routes_config(self):
self.post("/v1/chat/completions")(self.chat_completions)




def _clean_messages(self, request: dict):
for message in request["messages"]:
if message["role"] == "user":
Expand All @@ -103,34 +107,52 @@ def _clean_messages(self, request: dict):
if type(content[0].get("text", None)) is str:
message["content"] = content[0]["text"]


def _remove_system_prompt(self, request: dict):
if request["messages"][0]["role"] == "system":
request["messages"] = request["messages"][1:]

def default(self):
return f"This is default page for the {self.title}"

@log_call(action_type="chat_completions", include_result=False)
#TODO: I think this is wrong, we should send deltas when required using litellm streaming
def chat_completions(self, request: dict):
try:
loguru.logger.debug(request)
agent = self.agent
self._clean_messages(request)
self._remove_system_prompt(request)
if request["messages"]:
if request.get("stream") and str(request.get("stream")).lower() != "false":
return StreamingResponse(
self.agent.stream(request["messages"]), media_type="application/x-ndjson"
agent.stream(request["messages"]), media_type="text/event-stream"
)
resp_content = self.agent.query(request["messages"])
resp_content = agent.query(request["messages"])
else:
resp_content = "Something goes wrong, request did not contain messages!!!"
except Exception as e:
loguru.logger.error(str(e))
resp_content = str(e)

#TODO: I took it from Alex Karmazin implementation in LongevityGPTs but I THINK THIS IS TOTALLY WRONG

# Updated response format to match OpenAI API v1
return {
"id": "1",
"id": f"chatcmpl-{time.time()}", # Should be a unique identifier
"object": "chat.completion",
"created": time.time(),
"model": request["model"],
"choices": [{"message": {"role": "assistant", "content": resp_content}}],
}
"created": int(time.time()),
"model": request.get("model", "unknown"),
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": resp_content
},
"finish_reason": "stop" # Added finish_reason
}],
"usage": {
"prompt_tokens": 0, # Should implement token counting
"completion_tokens": 0, # Should implement token counting
"total_tokens": 0 # Should implement token counting
}
}
Loading

0 comments on commit 343924a

Please sign in to comment.