From 260b22b15fbd35aca651bef3cefa78f1158c10ca Mon Sep 17 00:00:00 2001 From: MarkRx Date: Mon, 19 Aug 2024 15:07:05 -0400 Subject: [PATCH] Add and document abilty to use LiteLLM Logging Observability tools --- .../usage-guide/additional_configurations.md | 21 +++++++++ .../algo/ai_handlers/litellm_ai_handler.py | 47 ++++++++++++++++--- pr_agent/settings/configuration.toml | 3 ++ 3 files changed, 65 insertions(+), 6 deletions(-) diff --git a/docs/docs/usage-guide/additional_configurations.md b/docs/docs/usage-guide/additional_configurations.md index 121d77b61..a9f323da5 100644 --- a/docs/docs/usage-guide/additional_configurations.md +++ b/docs/docs/usage-guide/additional_configurations.md @@ -91,3 +91,24 @@ user=""" """ ``` Note that the new prompt will need to generate an output compatible with the relevant [post-process function](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/tools/pr_description.py#L137). + +## Integrating with Logging Observability Platforms + +Various logging observability tools can be used out-of-the box when using the default LiteLLM AI Handler. Simply configure the LiteLLM callback settings in `configuration.toml` and set environment variables according to the LiteLLM [documentation](https://docs.litellm.ai/docs/). + +For example, to use [LangSmith](https://www.langchain.com/langsmith) you can add the following to your `configuration.toml` file: +``` +[litellm] +... +success_callback = ["langsmith"] +failure_callback = ["langsmith"] +service_callback = [] +``` + +Then set the following environment variables: + +``` +LANGSMITH_API_KEY= +LANGSMITH_PROJECT= +LANGSMITH_BASE_URL= +``` \ No newline at end of file diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index b4e3d0850..d364d3d83 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -1,10 +1,10 @@ import os import requests -import boto3 import litellm import openai from litellm import acompletion from tenacity import retry, retry_if_exception_type, stop_after_attempt + from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler from pr_agent.config_loader import get_settings from pr_agent.log import get_logger @@ -44,6 +44,12 @@ def __init__(self): litellm.use_client = True if get_settings().get("LITELLM.DROP_PARAMS", None): litellm.drop_params = get_settings().litellm.drop_params + if get_settings().get("LITELLM.SUCCESS_CALLBACK", None): + litellm.success_callback = get_settings().litellm.success_callback + if get_settings().get("LITELLM.FAILURE_CALLBACK", None): + litellm.failure_callback = get_settings().litellm.failure_callback + if get_settings().get("LITELLM.SERVICE_CALLBACK", None): + litellm.service_callback = get_settings().litellm.service_callback if get_settings().get("OPENAI.ORG", None): litellm.organization = get_settings().openai.org if get_settings().get("OPENAI.API_TYPE", None): @@ -90,27 +96,56 @@ def prepare_logs(self, response, system, user, resp, finish_reason): return response_log def add_litellm_callbacks(selfs, kwargs) -> dict: - pr_metadata = [] + captured_extra = [] def capture_logs(message): # Parsing the log message and context record = message.record log_entry = {} - if record.get('extra', {}).get('command', None) is not None: + if record.get('extra', None).get('command', None) is not None: log_entry.update({"command": record['extra']["command"]}) if record.get('extra', {}).get('pr_url', None) is not None: log_entry.update({"pr_url": record['extra']["pr_url"]}) # Append the log entry to the captured_logs list - pr_metadata.append(log_entry) + captured_extra.append(log_entry) # Adding the custom sink to Loguru handler_id = get_logger().add(capture_logs) get_logger().debug("Capturing logs for litellm callbacks") get_logger().remove(handler_id) + context = captured_extra[0] if len(captured_extra) > 0 else None + + command = context.get("command", "unknown") + pr_url = context.get("pr_url", "unknown") + git_provider = get_settings().config.git_provider + + metadata = dict() + callbacks = litellm.success_callback + litellm.failure_callback + litellm.service_callback + if "langfuse" in callbacks: + metadata.update({ + "trace_name": command, + "tags": [git_provider, command], + "trace_metadata": { + "command": command, + "pr_url": pr_url, + }, + }) + if "langsmith" in callbacks: + metadata.update({ + "run_name": command, + "tags": [git_provider, command], + "extra": { + "metadata": { + "command": command, + "pr_url": pr_url, + } + }, + }) + # Adding the captured logs to the kwargs - kwargs["metadata"] = pr_metadata + kwargs["metadata"] = metadata return kwargs @@ -125,7 +160,7 @@ def deployment_id(self): retry=retry_if_exception_type((openai.APIError, openai.APIConnectionError, openai.APITimeoutError)), # No retry on RateLimitError stop=stop_after_attempt(OPENAI_RETRIES) ) - async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2, img_path: str = None): + async def chat_completion(self, model: str, system: str, user: str, metadata:dict = None, temperature: float = 0.2, img_path: str = None): try: resp, finish_reason = None, None deployment_id = self.deployment_id diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index 79d30f1d4..6d34010a6 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -265,6 +265,9 @@ pr_commands = [ # use_client = false # drop_params = false enable_callbacks = false +success_callback = [] +failure_callback = [] +service_callback = [] [pr_similar_issue] skip_comments = false