Skip to content

Commit

Permalink
* API improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
asofter committed Feb 14, 2024
1 parent 51c6ba5 commit cb9cc9f
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 103 deletions.
2 changes: 1 addition & 1 deletion docs/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
-

### Changed
-
- API Documentation and Code improvements

### Removed
-
Expand Down
204 changes: 103 additions & 101 deletions docs/usage/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,135 +114,137 @@ The following exporters are available for tracing:

### Python

```python title="sync_llm_guard_client.py" linenums="1"
import os
import requests
=== "Synchronous"

LLM_GUARD_API_KEY = os.environ.get("LLM_GUARD_API_KEY")
LLM_GUARD_BASE_URL = os.environ.get("LLM_GUARD_URL")
```python linenums="1"
import os
import requests

class LLMGuardMaliciousPromptException(Exception):
scores = {}
LLM_GUARD_API_KEY = os.environ.get("LLM_GUARD_API_KEY")
LLM_GUARD_BASE_URL = os.environ.get("LLM_GUARD_URL")

def __init__(self, *args, **kwargs):
super().__init__(*args)
self.scores = kwargs.get("scores", {})
class LLMGuardMaliciousPromptException(Exception):
scores = {}

def __str__(self):
scanners = [scanner for scanner, score in self.scores.items() if score > 0]
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.scores = kwargs.get("scores", {})

return f"LLM Guard detected a malicious prompt. Scanners triggered: {', '.join(scanners)}; scores: {self.scores}"
def __str__(self):
scanners = [scanner for scanner, score in self.scores.items() if score > 0]

return f"LLM Guard detected a malicious prompt. Scanners triggered: {', '.join(scanners)}; scores: {self.scores}"

class LLMGuardRequestException(Exception):
pass

def request_llm_guard_prompt(prompt: str):
try:
response = requests.post(
url=f"{LLM_GUARD_BASE_URL}/analyze/prompt",
json={"prompt": prompt},
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {LLM_GUARD_API_KEY}",
},
ssl=False,
)

response_json = response.json()
except requests.RequestException as err:
raise LLMGuardRequestException(err)
class LLMGuardRequestException(Exception):
pass

if not response_json["is_valid"]:
raise LLMGuardMaliciousPromptException(scores=response_json["scanners"])

return response_json["sanitized_prompt"]
def request_llm_guard_prompt(prompt: str):
try:
response = requests.post(
url=f"{LLM_GUARD_BASE_URL}/analyze/prompt",
json={"prompt": prompt},
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {LLM_GUARD_API_KEY}",
},
ssl=False,
)

prompt = "Write a Python function to calculate the factorial of a number."
sanitized_prompt = request_llm_guard_prompt(prompt)
print(sanitized_prompt)
```
response_json = response.json()
except requests.RequestException as err:
raise LLMGuardRequestException(err)

Alternatively, you can call LLM provider and LLM Guard API in parallel:
if not response_json["is_valid"]:
raise LLMGuardMaliciousPromptException(scores=response_json["scanners"])

```python title="async_llm_guard_client.py" linenums="1"
import os
import asyncio
import aiohttp
from openai import AsyncOpenAI
return response_json["sanitized_prompt"]

LLM_GUARD_API_KEY = os.environ.get("LLM_GUARD_API_KEY")
LLM_GUARD_BASE_URL = os.environ.get("LLM_GUARD_URL")
openai_client = AsyncOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
system_prompt = "You are a Python tutor."
prompt = "Write a Python function to calculate the factorial of a number."
sanitized_prompt = request_llm_guard_prompt(prompt)
print(sanitized_prompt)
```

class LLMGuardMaliciousPromptException(Exception):
scores = {}
=== "Call LLM provider and LLM Guard API in parallel"

def __init__(self, *args, **kwargs):
super().__init__(*args)
self.scores = kwargs.get("scores", {})
```python linenums="1"
import os
import asyncio
import aiohttp
from openai import AsyncOpenAI

def __str__(self):
scanners = [scanner for scanner, score in self.scores.items() if score > 0]
LLM_GUARD_API_KEY = os.environ.get("LLM_GUARD_API_KEY")
LLM_GUARD_BASE_URL = os.environ.get("LLM_GUARD_URL")
openai_client = AsyncOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
system_prompt = "You are a Python tutor."

return f"LLM Guard detected a malicious prompt. Scanners triggered: {', '.join(scanners)}; scores: {self.scores}"
class LLMGuardMaliciousPromptException(Exception):
scores = {}

def __init__(self, *args, **kwargs):
super().__init__(*args)
self.scores = kwargs.get("scores", {})

class LLMGuardRequestException(Exception):
pass
def __str__(self):
scanners = [scanner for scanner, score in self.scores.items() if score > 0]

async def request_openai(prompt: str) -> str:
chat_completion = await openai_client.chat.completions.create(
messages=[
{
"role": "system",
"content": system_prompt,
},
{"role": "user", "content": prompt},
],
model="gpt-3.5-turbo",
)
return f"LLM Guard detected a malicious prompt. Scanners triggered: {', '.join(scanners)}; scores: {self.scores}"

return chat_completion.choices[0].message.content

class LLMGuardRequestException(Exception):
pass

async def request_llm_guard_prompt(prompt: str):
async with aiohttp.ClientSession() as session:
try:
response = await session.post(
url=f"{LLM_GUARD_BASE_URL}/analyze/prompt",
json={"prompt": prompt},
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {LLM_GUARD_API_KEY}",
async def request_openai(prompt: str) -> str:
chat_completion = await openai_client.chat.completions.create(
messages=[
{
"role": "system",
"content": system_prompt,
},
ssl=False,
raise_for_status=True,
)
{"role": "user", "content": prompt},
],
model="gpt-3.5-turbo",
)

response_json = await response.json()
except Exception as e:
raise LLMGuardRequestException(e)
return chat_completion.choices[0].message.content


async def request_llm_guard_prompt(prompt: str):
async with aiohttp.ClientSession() as session:
try:
response = await session.post(
url=f"{LLM_GUARD_BASE_URL}/analyze/prompt",
json={"prompt": prompt},
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {LLM_GUARD_API_KEY}",
},
ssl=False,
raise_for_status=True,
)

response_json = await response.json()
except Exception as e:
raise LLMGuardRequestException(e)

if not response_json["is_valid"]:
raise LLMGuardMaliciousPromptException(scores=response_json["scanners"])

async def generate_completion(prompt: str) -> str:
result = await asyncio.gather(
request_llm_guard_prompt(prompt),
request_openai(prompt),
)

if not response_json["is_valid"]:
raise LLMGuardMaliciousPromptException(scores=response_json["scanners"])
return result[1]

async def generate_completion(prompt: str) -> str:
result = await asyncio.gather(
request_llm_guard_prompt(prompt),
request_openai(prompt),
prompt = "Write a Python function to calculate the factorial of a number."
message = asyncio.run(
generate_completion(prompt)
)

return result[1]

prompt = "Write a Python function to calculate the factorial of a number."
message = asyncio.run(
generate_completion(prompt)
)
```
```

## Troubleshooting

Expand Down
3 changes: 2 additions & 1 deletion mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,8 @@ theme:

markdown_extensions:
- pymdownx.highlight
- pymdownx.tabbed
- pymdownx.tabbed:
alternate_style: true
- pymdownx.tasklist
- pymdownx.inlinehilite
- pymdownx.snippets
Expand Down

0 comments on commit cb9cc9f

Please sign in to comment.