diff --git a/docs/docs/usage-guide/additional_configurations.md b/docs/docs/usage-guide/additional_configurations.md index 121d77b61..a9f323da5 100644 --- a/docs/docs/usage-guide/additional_configurations.md +++ b/docs/docs/usage-guide/additional_configurations.md @@ -91,3 +91,24 @@ user=""" """ ``` Note that the new prompt will need to generate an output compatible with the relevant [post-process function](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/tools/pr_description.py#L137). + +## Integrating with Logging Observability Platforms + +Various logging observability tools can be used out-of-the box when using the default LiteLLM AI Handler. Simply configure the LiteLLM callback settings in `configuration.toml` and set environment variables according to the LiteLLM [documentation](https://docs.litellm.ai/docs/). + +For example, to use [LangSmith](https://www.langchain.com/langsmith) you can add the following to your `configuration.toml` file: +``` +[litellm] +... +success_callback = ["langsmith"] +failure_callback = ["langsmith"] +service_callback = [] +``` + +Then set the following environment variables: + +``` +LANGSMITH_API_KEY= +LANGSMITH_PROJECT= +LANGSMITH_BASE_URL= +``` \ No newline at end of file diff --git a/pr_agent/agent/pr_agent.py b/pr_agent/agent/pr_agent.py index d0ac46caf..8bf6cff75 100644 --- a/pr_agent/agent/pr_agent.py +++ b/pr_agent/agent/pr_agent.py @@ -79,7 +79,7 @@ async def handle_request(self, pr_url, request, notify=None) -> bool: if action not in command2class: get_logger().debug(f"Unknown command: {action}") return False - with get_logger().contextualize(command=action): + with get_logger().contextualize(command=action, pr_url=pr_url): get_logger().info("PR-Agent request handler started", analytics=True) if action == "reflect_and_review": get_settings().pr_reviewer.ask_and_reflect = True diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index c8b620fe2..f577bead2 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -1,10 +1,10 @@ import os import requests -import boto3 import litellm import openai from litellm import acompletion from tenacity import retry, retry_if_exception_type, stop_after_attempt + from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.config_loader import get_settings from pr_agent.log import get_logger @@ -44,6 +44,12 @@ def __init__(self): litellm.use_client = True if get_settings().get("LITELLM.DROP_PARAMS", None): litellm.drop_params = get_settings().litellm.drop_params + if get_settings().get("LITELLM.SUCCESS_CALLBACK", None): + litellm.success_callback = get_settings().litellm.success_callback + if get_settings().get("LITELLM.FAILURE_CALLBACK", None): + litellm.failure_callback = get_settings().litellm.failure_callback + if get_settings().get("LITELLM.SERVICE_CALLBACK", None): + litellm.service_callback = get_settings().litellm.service_callback if get_settings().get("OPENAI.ORG", None): litellm.organization = get_settings().openai.org if get_settings().get("OPENAI.API_TYPE", None): @@ -89,6 +95,60 @@ def prepare_logs(self, response, system, user, resp, finish_reason): response_log['main_pr_language'] = 'unknown' return response_log + def add_litellm_callbacks(selfs, kwargs) -> dict: + captured_extra = [] + + def capture_logs(message): + # Parsing the log message and context + record = message.record + log_entry = {} + if record.get('extra', None).get('command', None) is not None: + log_entry.update({"command": record['extra']["command"]}) + if record.get('extra', {}).get('pr_url', None) is not None: + log_entry.update({"pr_url": record['extra']["pr_url"]}) + + # Append the log entry to the captured_logs list + captured_extra.append(log_entry) + + # Adding the custom sink to Loguru + handler_id = get_logger().add(capture_logs) + get_logger().debug("Capturing logs for litellm callbacks") + get_logger().remove(handler_id) + + context = captured_extra[0] if len(captured_extra) > 0 else None + + command = context.get("command", "unknown") + pr_url = context.get("pr_url", "unknown") + git_provider = get_settings().config.git_provider + + metadata = dict() + callbacks = litellm.success_callback + litellm.failure_callback + litellm.service_callback + if "langfuse" in callbacks: + metadata.update({ + "trace_name": command, + "tags": [git_provider, command], + "trace_metadata": { + "command": command, + "pr_url": pr_url, + }, + }) + if "langsmith" in callbacks: + metadata.update({ + "run_name": command, + "tags": [git_provider, command], + "extra": { + "metadata": { + "command": command, + "pr_url": pr_url, + } + }, + }) + + # Adding the captured logs to the kwargs + kwargs["metadata"] = metadata + + return kwargs + @property def deployment_id(self): """ @@ -133,6 +193,10 @@ async def chat_completion(self, model: str, system: str, user: str, temperature: "force_timeout": get_settings().config.ai_timeout, "api_base": self.api_base, } + + if get_settings().litellm.get("enable_callbacks", False): + kwargs = self.add_litellm_callbacks(kwargs) + seed = get_settings().config.get("seed", -1) if temperature > 0 and seed >= 0: raise ValueError(f"Seed ({seed}) is not supported with temperature ({temperature}) > 0") diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index 67273c46d..6d34010a6 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -264,6 +264,10 @@ pr_commands = [ [litellm] # use_client = false # drop_params = false +enable_callbacks = false +success_callback = [] +failure_callback = [] +service_callback = [] [pr_similar_issue] skip_comments = false