Skip to content

Commit

Permalink
feat(config): add import of args module in __init__.py
Browse files Browse the repository at this point in the history
refactor(config): streamline argparse setup in args.py
feat(config): add new command line argument for file input in args.py
refactor(config): update constants with new system content description
feat(main): integrate argument parsing and dynamic user content handling
feat(main): add new payload creation function for POST requests
feat(models): add new LMStudioChatResponse data model
feat(utils): implement utility function to read file content
  • Loading branch information
myinusa committed Aug 31, 2024
1 parent c338b99 commit 5428fd4
Show file tree
Hide file tree
Showing 8 changed files with 168 additions and 49 deletions.
1 change: 1 addition & 0 deletions filename_suggestion_ai/config/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from .args import *
from .constants import *
from .enviornment_setup import *
from .setup_logging import *
28 changes: 11 additions & 17 deletions filename_suggestion_ai/config/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,12 @@

def get_parsed_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
# description=DESC, # Program description
# formatter_class=RichHelpFormatterPlus, # Disable line wrapping
allow_abbrev=False, # Disable abbreviations
add_help=False, # Disable default help
allow_abbrev=False,
add_help=False,
)

# parser.add_argument("--playlist", action="store_true", help="playlist process.")
# parser.add_argument("--video_items", action="store_true", help="List of video items to process.")

g_misc = parser.add_argument_group("Miscellaneous Options")
# Help
g_misc.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
# Verbose
g_misc.add_argument(
"-v",
"--verbose",
Expand All @@ -25,7 +18,6 @@ def get_parsed_args() -> argparse.Namespace:
default=False,
help="Show log messages on screen. Default is False.",
)
# Debug
g_misc.add_argument(
"-d",
"--debug",
Expand All @@ -34,13 +26,15 @@ def get_parsed_args() -> argparse.Namespace:
default=False,
help="Activate debug logs. Default is False.",
)
# New argument for file input
g_misc.add_argument(
"-f",
"--file",
dest="file",
required=True,
help="Path to the file whose content will be sent as user content in the payload.",
)

args = parser.parse_args()

# Conditional check to ensure at least one argument is provided
# if not args.playlist and not args.video_items:
# print("Error: At least one of --playlist or --video_items must be provided.")
# parser.print_help()
# sys.exit(1)

return args
return args
4 changes: 2 additions & 2 deletions filename_suggestion_ai/config/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@
"Content-Type": "application/json",
}
MODEL = "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF"
SYSTEM_CONTENT = ""
USER_CONTENT = "What is the meaning of life?"
SYSTEM_CONTENT = "From the content of the file, generate a filename. Use snake case, max 25 characters, no file extension or special characters. Only key elements, one word if possible in noun-verb format. Avoid using names that are too general or too wordy. Respond ONLY with filename."
USER_CONTENT = "What is the meaning of life?"
102 changes: 72 additions & 30 deletions filename_suggestion_ai/main.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,74 @@
from __future__ import annotations

import logging
from pathlib import Path
from typing import Optional

import requests

from filename_suggestion_ai.config import HEADERS, MODEL, SYSTEM_CONTENT, URL, USER_CONTENT, initialize_application

PAYLOAD = {
"model": MODEL,
"messages": [
{
"role": "system",
"content": SYSTEM_CONTENT,
},
{
"role": "user",
"content": USER_CONTENT,
},
],
"temperature": 0.8,
"max_tokens": -1,
"seed": -1,
"stream": False,
}
from filename_suggestion_ai.config import (
HEADERS,
MODEL,
SYSTEM_CONTENT,
URL,
get_parsed_args,
initialize_application,
)
from filename_suggestion_ai.models import LMStudioChatResponse
from filename_suggestion_ai.utils import read_file_content


def create_payload(user_content: str) -> dict:
"""
Creates the payload for the POST request using the user content.
Args:
user_content (str): The content provided by the user.
Returns:
dict: The payload dictionary.
"""
return {
"model": MODEL,
"messages": [
{
"role": "system",
"content": SYSTEM_CONTENT,
},
{
"role": "user",
"content": user_content,
},
],
"temperature": 0.8,
"max_tokens": -1,
"seed": -1,
"stream": False,
}


def main() -> None:
args = get_parsed_args()
initialize_application()
send_post_request(URL, HEADERS, PAYLOAD)

# Read file content
user_content = read_file_content(Path(args.file))
if user_content is None:
logging.error("Failed to read file content. Exiting.")
return

# Create and send payload
payload = create_payload(user_content)
response = send_post_request(URL, HEADERS, payload)
if response is None:
logging.error("Failed to receive a valid response. Exiting.")
return
answer = response["choices"][0]["message"]["content"]

logging.info("Answer: %s", answer)


def send_post_request(url: str, headers: dict[str, str], payload: dict) -> None:
def send_post_request(url: str, headers: dict[str, str], payload: dict) -> LMStudioChatResponse | None:
"""
Send a POST request to the specified URL with the given headers and payload.
Expand All @@ -38,20 +78,22 @@ def send_post_request(url: str, headers: dict[str, str], payload: dict) -> None:
payload (Dict): The JSON payload for the POST request.
Returns:
None
Optional[ChatCompletionResponse]: The parsed chat completion response or None if an error occurs.
"""
try:
logging.info("Sending POST request to %s", url)
logging.info(f"Sending POST request to {url}")
response = requests.post(url, json=payload, headers=headers, timeout=60)
response.raise_for_status() # Raises an HTTPError for bad responses
logging.info("Successfully sent POST request. Status Code: %s", response.status_code)
logging.info("Response Content: %s", response.text)
except requests.exceptions.HTTPError as e:
logging.error("HTTP error occurred: %s", e)
except requests.exceptions.RequestException as e:
logging.error("Error during requests to %s: %s", url, e)
except Exception as e:
logging.error("An unexpected error occurred: %s", e)
logging.debug("Complete response")
return LMStudioChatResponse(**response.json())
except requests.exceptions.HTTPError:
logging.exception("HTTP error occurred")
except requests.exceptions.RequestException:
logging.exception("Error during requests to %s", url)
except Exception:
logging.exception("An unexpected error occurred")
return None


if __name__ == "__main__":
Expand Down
1 change: 1 addition & 0 deletions filename_suggestion_ai/models/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .lm_studio_chat_completion import *
54 changes: 54 additions & 0 deletions filename_suggestion_ai/models/lm_studio_chat_completion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from __future__ import annotations

from dataclasses import dataclass, field
from typing import Optional, Union


@dataclass
class Message:
role: str
content: str


@dataclass
class Choice:
index: int
message: Message
finish_reason: str
logprobs: dict | None = None


@dataclass
class Usage:
prompt_tokens: int
completion_tokens: int
total_tokens: int


@dataclass
class LMStudioChatResponse:
"""
LM Studio chat response.
See https://platform.openai.com/docs/api-reference/chat/create
See https://lmstudio.ai/docs/local-server
"""

id: str
object: str
created: int
model: str
choices: list[Choice]
usage: Usage
system_fingerprint: str

def __getitem__(self, key: str | int) -> LMStudioChatResponse:
# if isinstance(key, int):
# Assuming integer keys are for accessing choices list
# return self.choices[key]
if isinstance(key, str):
# String keys for attribute access
return getattr(self, key)
else:
msg = "Key must be either an integer or a string"
raise TypeError(msg)
1 change: 1 addition & 0 deletions filename_suggestion_ai/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .file_util import *
26 changes: 26 additions & 0 deletions filename_suggestion_ai/utils/file_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from __future__ import annotations

import logging
from pathlib import Path


def read_file_content(file_path: Path) -> str | None:
"""
Reads the content of a file and returns it as a string.
Args:
file_path (Path): The path to the file to be read.
Returns:
Optional[str]: The content of the file as a string, or None if an error occurs.
"""
try:
with file_path.open("r") as file:
return file.read().replace("\n", "\\n")
except FileNotFoundError:
logging.exception("File not found: %s", file_path)
except OSError as e:
logging.exception(f"I/O error({e.errno}): {e.strerror}")
except Exception:
logging.exception("An unexpected error occurred while reading the file")
return None

0 comments on commit 5428fd4

Please sign in to comment.