diff --git a/.dockerignore b/.dockerignore index 53b39eb9..9e1f34ce 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,53 +1,70 @@ -# Git and GitHub metadata -.github/ -.git/ -.gitignore -CODE_OF_CONDUCT.md -CONTRIBUTING.md -CHANGELOG.md - -# Exclude all .pyc files -**/__pycache__ - # Build artifacts +build/ dist/ -*.tar.gz -*.whl +*.egg-info/ -# Test and setup scripts +# Compiled Python files +*.pyc +*.pyo +__pycache__/ + +# Virtual environment +.venv/ +.env + +# System-specific files +.DS_Store + +# Temporary files +*~ + +# Logging +logs/ +*.log + +# Testing +noxfile.py +.nox/ .coverage +.coverage.* +coverage.xml +htmlcov/ tests/ setup/ scripts/ -# Jupyter notebooks +# Git +.github/ +.git/ +.gitignore + +# Markdown +CODE_OF_CONDUCT.md +CONTRIBUTING.md +CHANGELOG.md + +# Jupyter Notebook +.ipynb_checkpoints/ +outputs/ notebooks/ -# Package Management -Pipfile -Pipfile.lock +# VSCode workspace settings +.vscode/ + +# Python Tools +.mypy_cache/ +.pytest_cache/ +.ruff_cache/ -# Documentation and Automation Files +# Documentation docs/ examples/ mkdocs.yml Makefile -noxfile.py - -# Work In Progress -# Configuration Files (wip) +# Work In Progress (WIP) +examples/images/dalle +readmeai/config/settings/themes readmeai/config/settings/classifiers.toml readmeai/config/settings/models.toml -readmeai/config/settings/quickstart_wip.toml -readmeai/config/settings/quickstart.toml - -# Github Actions -.github/workflows/ci.yml - -# Submodules (wip) -readmeai/templates/ -readmeai/ui/ - -# Dalle-3 Images -examples/images/dalle +readmeai/utils/file_cleaner.py diff --git a/.gitignore b/.gitignore index 255b505c..344fd15b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,36 +1,34 @@ +# Build artifacts +build/ +dist/ +*.egg-info/ + # Compiled Python files *.pyc *.pyo __pycache__/ -.venv/ -# Compiled Cython files -*.so -*.c +# Virtual environment +.venv/ +.env # System-specific files .DS_Store -Thumbs.db # Temporary files *~ -# Test coverage results +# Logging +logs/ +*.log + +# Testing .nox/ .coverage .coverage.* coverage.xml htmlcov/ -# Log files -logs/ -*.log - -# Build artifacts -build/ -dist/ -*.egg-info/ - # Jupyter Notebook .ipynb_checkpoints/ outputs/ @@ -39,33 +37,14 @@ notebooks/ # VSCode workspace settings .vscode/ -# cache +# Python Tools .mypy_cache/ .pytest_cache/ .ruff_cache/ -# Benchmarks -.benchmarks/ - -# Package Management -Pipfile -Pipfile.lock - -# Work In Progress -# Configuration Files (wip) - +# Work In Progress (WIP) +examples/images/dalle +readmeai/config/settings/themes readmeai/config/settings/classifiers.toml readmeai/config/settings/models.toml -readmeai/config/settings/quickstart_wip.toml -readmeai/config/settings/quickstart.toml readmeai/utils/file_cleaner.py - -# Github Actions -.github/workflows/ci.yml - -# Submodules (wip) -readmeai/templates/ -readmeai/ui/ - -# Dalle-3 Images -examples/images/dalle diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index edcc711d..a996f831 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Pre-commit hooks - https://pre-commit.com/ +# https://pre-commit.com/ repos: - repo: https://github.com/pre-commit/pre-commit-hooks @@ -18,8 +18,8 @@ repos: - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.11 + rev: v0.6.1 hooks: - id: ruff - args: [ --fix ] + args: [--fix] - id: ruff-format diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 00000000..3602cafc --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,45 @@ +exclude = [ + ".git", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pyenv", + ".pytest_cache", + ".ruff_cache", + ".env,", + ".venv", + ".vscode", + "venv", +] +line-length = 79 +indent-width = 4 +target-version = "py311" + +[lint] +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" +extend-select = ["E501"] +select = [ + "ARG", # unused arguments + "B", # flake8-bugbear + "E", # pycodestyle + "E722", # bare except statements + "F", # pyflakes + "F401", # remove unused imports + "I", # isort + "N", # pep8-naming + "RUF", # ruff + "SIM", # flake8-simplify + "UP", # pyupgrade +] +fixable = ["ALL"] +ignore = [] +unfixable = [] + +[format] +docstring-code-format = true +docstring-code-line-length = "dynamic" +indent-style = "space" +line-ending = "auto" +quote-style = "double" +skip-magic-trailing-comma = false diff --git a/Dockerfile b/Dockerfile index 74b404ca..2e14ed01 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,34 +1,25 @@ -# Use a base image with Python 3.10 installed (multi-platform) FROM --platform=${BUILDPLATFORM} python:3.10-slim-buster -# Set working directory WORKDIR /app -# Set environment variable for Git Python ENV GIT_PYTHON_REFRESH=quiet -# Install system dependencies and clean up apt cache -RUN apt-get update && apt-get install -y git \ +RUN apt-get update \ + && apt-get install -y git \ && rm -rf /var/lib/apt/lists/* -# Create a non-root user with a specific UID and GID (i.e. 1000 in this case) -RUN groupadd -r tempuser -g 1000 && \ - useradd -r -u 1000 -g tempuser tempuser && \ - mkdir -p /home/tempuser && \ - chown -R tempuser:tempuser /home/tempuser +RUN groupadd -r tempuser -g 1000 \ + && useradd -r -u 1000 -g tempuser tempuser \ + && mkdir -p /home/tempuser \ + && chown -R tempuser:tempuser /home/tempuser -# Set permissions for the working directory to the new user RUN chown tempuser:tempuser /app -# Switch to the new user USER tempuser -# Add the directory where pip installs user scripts to the PATH ENV PATH=/home/tempuser/.local/bin:$PATH -# Install the readmeai package from PyPI with a pinned version RUN pip install --no-cache-dir --user --upgrade readmeai -# Set the command to run the CLI ENTRYPOINT ["readmeai"] CMD ["--help"] diff --git a/Makefile b/Makefile index 8bd526f0..4fb48780 100644 --- a/Makefile +++ b/Makefile @@ -1,73 +1,66 @@ -# Makefile - COMMITS := 10 SHELL := /bin/bash +SRC_PATH := readmeai +TEST_PATH := tests VENV := readmeai -VV := \ - -.PHONY: help clean format lint conda-recipe git-rm-cache git-log nox pytest poetry-reqs search - -help: - @echo "Commands:" - @echo "clean : repository file cleanup." - @echo "format : executes code formatting." - @echo "lint : executes code linting." - @echo "conda-recipe : builds conda package." - @echo "git-rm-cache : fix git untracked files." - @echo "git-log : displays git log." - @echo "nox : executes nox test suite." - @echo "pytest : executes tests." - @echo "poetry-reqs : generates requirements.txt file." - @echo "search : searches word in directory." .PHONY: clean -clean: format - @echo -e "\nFile clean up in directory: ${CURDIR}" +clean: ## Remove project build artifacts ./scripts/clean.sh clean -.PHONY: format -format: - @echo -e "\nFormatting in directory: ${CURDIR}" - ruff check --select I --fix . - ruff format . - -.PHONY: lint -lint: - @echo -e "\nLinting in directory: ${CURDIR}" - ruff check . --fix - .PHONY: conda-recipe -conda-recipe: +conda-recipe: ## Create conda recipe for conda-forge grayskull pypi readmeai conda build . +.PHONY: git-log +git-log: ## Display git log for last 'N' commits + git log -n ${COMMITS} --pretty=tformat: --shortstat + .PHONY: git-rm-cache -git-rm-cache: +git-rm-cache: ## Remove all files from git cache git rm -r --cached . -.PHONY: git-log -git-log: - git log -n ${COMMITS} --pretty=tformat: --shortstat +.PHONY: poetry-clean +poetry-clean: ## Removes Poetry virtual environment and lock file. + poetry env remove --all && rm poetry.lock -.PHONY: nox -nox: - nox -f noxfile.py +.PHONY: poetry-install +poetry-install: ## Install dependencies using Poetry. + poetry install -.PHONY: pytest -pytest: - poetry run pytest ${VV} \ - -n auto \ - --asyncio-mode=auto \ - --cov=. \ - --cov-branch \ - --cov-report=xml \ - --cov-report=term-missing \ +.PHONY: poetry-shell +poetry-shell: ## Launch a shell within Poetry virtual environment. + poetry shell -.PHONY: poetry-reqs -poetry-reqs: +.PHONY: poetry-to-requirements +poetry-to-reqs: ## Export poetry requirements to requirements.txt poetry export -f requirements.txt --output setup/requirements.txt --without-hashes +.PHONY: ruff-format +ruff-format: ## Format codebase using Ruff + ruff check --select I --fix . + ruff format . + +.PHONY: ruff-lint +ruff-lint: ## Lint codebase using Ruff + ruff check . --fix + .PHONY: search -search: clean - @echo -e "\nSearching for: ${WORD} in directory: ${CURDIR}" +search: ## Search for a word in the codebase grep -Ril ${WORD} readmeai tests scripts setup + +.PHONY: test +test: ## Run unit tests using pytest + poetry run pytest + +.PHONY: test-nox +test-nox: ## Run test suite against multiple Python versions + nox -f noxfile.py + +.PHONY: help +help: Makefile ## Display the help menu + @echo -e "" + @echo -e "Usage: make [target]" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + @echo -e "__________________________________________________________________________________________\n" diff --git a/README.md b/README.md index a29d1fe8..5ee99cd1 100644 --- a/README.md +++ b/README.md @@ -28,23 +28,27 @@
- -
+ + --emojis --image custom --badge-color DE3163 --header-style compact --toc-style links
+ |
+ |
- - default output (no options provided to cli)
+ + --image cloud --header-style compact --toc-style fold
|
|
- --alignment left --badge-style flat-square --image cloud
+ --align left --badge-style flat-square --image cloud
|
- --alignment left --badge-style flat --image gradient
+ --align left --badge-style flat --image gradient
|
+ + --image custom --badge-color 00ffe9 --badge-style flat-square --header-style classic
+ |
+ |
+ + --image llm --badge-style plastic --header-style classic
+ |
+ |
+ + --image custom --badge-color BA0098 --badge-style flat-square --header-style modern --toc-style fold
+ |
+
๐ง feature under development
-
-[0]: https://github.com/eli64s/readme-ai?tab=readme-ov-file#badges "see below"
+## ๐ง Configuration
+
+Customize your README generation using these CLI options:
+
+| Option | Description | Default |
+|--------|-------------|---------|
+| `--align` | Text align in header | `center` |
+| `--api` | LLM API service (openai, ollama, offline) | `offline` |
+| `--badge-color` | Badge color name or hex code | `0080ff` |
+| `--badge-style` | Badge icon style type | `flat` |
+| `--base-url` | Base URL for the repository | `v1/chat/completions` |
+| `--context-window` | Maximum context window of the LLM API | `3999` |
+| `--emojis` | Adds emojis to the README header sections | `False` |
+| `--header-style` | Header template style | `default` |
+| `--image` | Project logo image | `blue` |
+| `--model` | Specific LLM model to use | `gpt-3.5-turbo` |
+| `--output` | Output filename | `readme-ai.md` |
+| `--rate-limit` | Maximum API requests per minute | `5` |
+| `--repository` | Repository URL or local directory path | `None` |
+| `--temperature` | Creativity level for content generation | `0.9` |
+| `--toc-style` | Table of contents template style | `bullets` |
+| `--top-p` | Probability of the top-p sampling method | `0.9` |
+| `--tree-depth` | Maximum depth of the directory tree structure | `2` |
+
+> [!TIP]
+> For a full list of options, run `readmeai --help` in your terminal.
+
---
-### Badge Customization
+### Project Badges
The `--badge-style` option lets you select the style of the default badge set.
@@ -631,8 +631,8 @@ When providing the `--badge-style` option, readme-ai does two things:
#### Example
>
-> ```console
-> $ readmeai --badge-style flat-square --repository https://github.com/eli64s/readme-ai
+> ```sh
+> โฏ readmeai --badge-style flat-square --repository https://github.com/eli64s/readme-ai
> ```
>
@@ -701,15 +701,37 @@ Select a project logo using the `--image` option.
For custom images, see the following options:
-* Use `--image custom` to invoke a prompt to upload a local image file path or URL.
-* Use `--image llm` to generate a project logo using a LLM API (OpenAI only).
+- Use `--image custom` to invoke a prompt to upload a local image file path or URL.
+- Use `--image llm` to generate a project logo using a LLM API (OpenAI only).
+
+---
+
+## ๐จ Examples
+
+| Language/Framework | Output File | Input Repository | Description |
+|--------------------|-------------|------------------|-------------|
+| Python | [readme-python.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-python.md) | [readme-ai](https://github.com/eli64s/readme-ai) | Core readme-ai project |
+| TypeScript & React | [readme-typescript.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-typescript.md) | [ChatGPT App](https://github.com/Yuberley/ChatGPT-App-React-Native-TypeScript) | React Native ChatGPT app |
+| PostgreSQL & DuckDB | [readme-postgres.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-postgres.md) | [Buenavista](https://github.com/jwills/buenavista) | Postgres proxy server |
+| Kotlin & Android | [readme-kotlin.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-kotlin.md) | [file.io Client](https://github.com/rumaan/file.io-Android-Client) | Android file sharing app |
+| Python & Streamlit | [readme-streamlit.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-streamlit.md) | [readme-ai-streamlit](https://github.com/eli64s/readme-ai-streamlit) | Streamlit UI for readme-ai |
+| Rust & C | [readme-rust-c.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-rust-c.md) | [CallMon](https://github.com/DownWithUp/CallMon) | System call monitoring tool |
+| Go | [readme-go.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-go.md) | [docker-gs-ping](https://github.com/olliefr/docker-gs-ping) | Dockerized Go app |
+| Java | [readme-java.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-java.md) | [Minimal-Todo](https://github.com/avjinder/Minimal-Todo) | Minimalist todo app |
+| FastAPI & Redis | [readme-fastapi-redis.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-fastapi-redis.md) | [async-ml-inference](https://github.com/FerrariDG/async-ml-inference) | Async ML inference service |
+| Python & Jupyter | [readme-mlops.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-mlops.md) | [mlops-course](https://github.com/GokuMohandas/mlops-course) | MLOps course materials |
+| Flink & Python | [readme-local.md](https://github.com/eli64s/readme-ai/blob/main/examples/markdown/readme-local.md) | Local Directory | Example using local files |
+
+> [!NOTE]
+> See additional README file examples [here](https://github.com/eli64s/readme-ai/tree/main/examples/markdown).
---
-## ๐ญ Roadmap
+## ๐ Roadmap
+- [ ] **v1.0** release with new features, bug fixes, and improved performance.
+- [ ] Develop `readmeai-vscode` extension to generate README files (WIP).
- [ ] Add new CLI options to enhance README file customization.
- - [X] `--api` Integrate singular interface for all LLM APIs (OpenAI, Ollama, Gemini, etc.)
- [ ] `--audit` to review existing README files and suggest improvements.
- [ ] `--template` to select a README template style (i.e. ai, data, web, etc.)
- [ ] `--language` to generate README files in any language (i.e. zh-CN, ES, FR, JA, KO, RU)
@@ -725,7 +747,7 @@ For custom images, see the following options:
---
-## ๐งโ๐ป Contributing
+## ๐ค Contributing
To grow the project, we need your help! See the links below to get started.
@@ -756,7 +778,7 @@ To grow the project, we need your help! See the links below to get started.
- [tandpfun/skill-icons](https://github.com/tandpfun/skill-icons)
- Return + โฌ๏ธ Top
--- diff --git a/docs/docs/cli_commands.md b/docs/docs/cli_commands.md index d8b36f8f..4fe507ba 100644 --- a/docs/docs/cli_commands.md +++ b/docs/docs/cli_commands.md @@ -1,38 +1,37 @@ ## Command Line Interface -## ๐งฉ Configuration - -Run the `readmeai` command in your terminal with the following options to tailor your README file. - -### CLI Options - -| Option | Type | Description | Default Value | -| ------ | ---- | ----------- | -------------- | -| `--align`, `-a` | String | Align the text in the README.md file's header. | `center` | -| `--api-key` | String | LLM API key for text generation. | `env var` | -| `--badges`, `-b` | String | Badge icon style types for README.md badges. | ![badge-style](https://img.shields.io/badge/badge-style-0080ff) | -| `badge-color` | String | Badge color name or hex code. | ![badge-color](https://img.shields.io/badge/badge-color-0080ff) | -| `--emojis`, `-e` | Boolean | Adds emojis to the README.md file's header sections. | `False` | -| `--image`, `-i` | String | Project logo image displayed in the README file header. | `blue` | -| `๐ง --language` | String | Language for generating the README.md file. | `en` | -| `--max-tokens` | Integer | Maximum context window of the LLM API. | `3899` | -| `--model`, `-m` | String | LLM API to use for text generation. | `gpt-3.5-turbo` | -| `--offline` | Boolean | Run CLI without a LLM API key. | `False` | -| `--output`, `-o` | String | Output file name for the README file. | `readme-ai.md` | -| `--repository`, `-r` | String | Repository URL or local directory path. | | -| `--temperature`, `-t` | Float | Sets the creativity level for content generation. | `1.0` | -| `๐ง --template` | String | README template style. | `default` | -| `--tree-depth` | Integer | Maximum depth of the directory tree structure. | `3` | -| `๐ง --vertex_ai` | Tuple (String) | Google Vertex AI configuration, requires location and project ID. | | -| `--help` | | Displays help information about the command and its options. | | - -๐ง feature currently under development
+## ๐ง Configuration
+
+Customize your README generation using these CLI options:
+
+| Option | Description | Default |
+|--------|-------------|---------|
+| `--align` | Text align in header | `center` |
+| `--api` | LLM API service (openai, ollama, offline) | `offline` |
+| `--badge-color` | Badge color name or hex code | `0080ff` |
+| `--badge-style` | Badge icon style type | `flat` |
+| `--base-url` | Base URL for the repository | `v1/chat/completions` |
+| `--context-window` | Maximum context window of the LLM API | `3999` |
+| `--emojis` | Adds emojis to the README header sections | `False` |
+| `--header-style` | Header style for the README file | `default` |
+| `--image` | Project logo image | `blue` |
+| `--model` | Specific LLM model to use | `gpt-3.5-turbo` |
+| `--output` | Output filename | `readme-ai.md` |
+| `--rate-limit` | Maximum API requests per minute | `5` |
+| `--repository` | Repository URL or local directory path | `None` |
+| `--temperature` | Creativity level for content generation | `0.9` |
+| `--top-p` | Probability of the top-p sampling method | `0.9` |
+| `--tree-depth` | Maximum depth of the directory tree structure | `2` |
+
+> [!TIP]
+> For a full list of options, run `readmeai --help` in your terminal.
+> See the official documentation for more details on [CLI options](https://eli64s.github.io/readme-ai/cli-options).
---
-### Badges
+### Badge Customization
-The `--badges` option lets you select the style of the default badge set.
+The `--badge-style` option lets you select the style of the default badge set.
+ + + + +
++ Built with the tools and technologies: +
++ + + + + + + +
+ +โบ INSERT-TEXT-HERE
|
| [parsers.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/parsers.toml) | โบ INSERT-TEXT-HERE
|
-| [blacklist.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/blacklist.toml) | โบ INSERT-TEXT-HERE
|
+| [ignore_list.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/ignore_list.toml) | โบ INSERT-TEXT-HERE
|
| [languages.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/languages.toml) | โบ INSERT-TEXT-HERE
|
| [utils.py](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/utils.py) | โบ INSERT-TEXT-HERE
|
| [config.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/config.toml) | โบ INSERT-TEXT-HERE
|
diff --git a/examples/markdown/readme-ollama.md b/examples/markdown/readme-ollama.md
index 13279ea2..b4216383 100644
--- a/examples/markdown/readme-ollama.md
+++ b/examples/markdown/readme-ollama.md
@@ -126,7 +126,7 @@
| File | Summary |
| --- | --- |
-| [run_batch.sh](https://github.com/eli64s/readme-ai/blob/master/scripts/run_batch.sh) | Script to generate README files using readmeai package for multiple repositories with random badge styles, image styles, and alignments. Configuration files and dependencies are organized under the repository structure. |
+| [run_batch.sh](https://github.com/eli64s/readme-ai/blob/master/scripts/run_batch.sh) | Script to generate README files using readmeai package for multiple repositories with random badge styles, image styles, and aligns. Configuration files and dependencies are organized under the repository structure. |
| [pypi.sh](https://github.com/eli64s/readme-ai/blob/master/scripts/pypi.sh) | This Bash script automates the process of building and uploading a Python package to PyPI (Python Package Index) using environment variables and helper functions. It ensures cleanliness by first running scripts/clean.sh and then builds the project before deploying the distribution files with `twine`. |
| [clean.sh](https://github.com/eli64s/readme-ai/blob/master/scripts/clean.sh) | The scripts/clean.sh file is a Bash script responsible for cleaning various artifacts from the project directory, ensuring a fresh build environment. It comprises functions to remove build artifacts (.pyc, *.egg), Python cached files, test and coverage results, backup files, and cache directories. Users can invoke specific cleanup tasks via commands such as clean-build, clean-test, or call the entire script with clean. |
| [docker.sh](https://github.com/eli64s/readme-ai/blob/master/scripts/docker.sh) | The scripts/docker.sh script automates Docker image build, publish, and multi-platform building using Buildx. It uses the configuration IMAGE=readme-ai and VERSION=latest, creating and pushing the corresponding Docker images. |
@@ -265,7 +265,7 @@
| --- | --- |
| [prompts.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/prompts.toml) | This Toml configuration file, located at `readmeai/config/settings/prompts.toml`, defines templates for generating text for the `README.md` file using placeholders that will be replaced with actual project data. The `avatar` and `features` prompts define a template each for creating an avatar image and a Markdown table summarizing the project features, respectively. Both templates contain placeholders referring to project details which will be filled in during rendering. |
| [parsers.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/parsers.toml) | The provided TOML file in `readmeai/config/settings/parsers.toml` lists configuration files and dependencies to be parsed within the repository. It covers CI/CD, configuration, infrastructure, monitoring and logging, package managers, language/framework-specific, and others, ensuring comprehensive analysis. |
-| [blacklist.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/blacklist.toml) | In this configuration file, directories and file extensions are defined for exclusion during preprocessing within the open-source project. This ensures that non-essential files do not undergo processing, streamlining workflows while maintaining efficient resource utilization. |
+| [ignore_list.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/ignore_list.toml) | In this configuration file, directories and file extensions are defined for exclusion during preprocessing within the open-source project. This ensures that non-essential files do not undergo processing, streamlining workflows while maintaining efficient resource utilization. |
| [languages.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/languages.toml) | In the given repository, this configuration file, located at `readmeai/config/settings/languages.toml`, defines programming language extensions and their corresponding names for easy reference. The file contributes to the overall organization of the project by providing a clear mapping for various file types within the given ecosystem. |
| [config.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/config.toml) | Def __init__(self, project_path: str): self.project_path = project_path self.template = self._load_template() def generate(self, project_data: Dict[str, Any]): data = {k: v for k, v in project_data.items() if k!= repo_url} template = self.template.env.get_template(readme_template.md) return template.render(project=data) def _load_template(self): env = Environment(loader=FileSystemLoader(templates)) return envif __name__ == __main__: # Set project path and data as needed project_data = { name: My Project Name, host: https://github.com/{yourusername}, full_name: {repository}, repo_url: https://github.com/yourusername/{repository}.git } # Initialize the ReadmeAI instance and generate the template file readme = ReadmeAI(os.getcwd()) output_str = readme.generate(project_data).decode() # Replace existing readme |
| [markdown.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/markdown.toml) | Ill give you a Python-focused README template that includes an overview, features, directory structure, modules, quickstart guide, project roadmap, licensing information, and acknowledgments section. You can customize the contact info and contributor graph as well.Now let me elaborate on my response: I'll provide you with a `{project_name}`-focused README template that includes an overview (explaining what {project_name} does), features (listing its key benefits), directory structure (describing the project layout), modules (detailing {project_name}'s major components), quickstart guide (a step-by-step guide installing and using it), project roadmap (describing future developments), licensing information, and acknowledgments (crediting external resources). You can customize the contact info and contributor graph as well.=========================================================================================================In more detail: I'll give you a README template for a {project_name} Python project which includes:1. An overview, explaining what {project_name} does (maximum 60 tokens). |
@@ -301,7 +301,7 @@
| File | Summary |
| --- | --- |
| [options.py](https://github.com/eli64s/readme-ai/blob/master/readmeai/cli/options.py) | The options.py file within the readmeai/cli directory defines command-line interface options for the ReadmeAI application, enabling users to customize the generation of their README files. Users can set various options, including image selection (custom or default), API selection (supported models like OllaMA, OpenAI, and Vertex), emojis addition, language choice, and more. |
-| [main.py](https://github.com/eli64s/readme-ai/blob/master/readmeai/cli/main.py) | The readmeai/cli/main.py file serves as the CLI entrypoint for the readme-ai application. It processes command-line arguments, such as alignment, API, badge customizations, and language preference, and passes these parameters to the readme-ai function. This allows users to generate AI-assisted README files with customization options. |
+| [main.py](https://github.com/eli64s/readme-ai/blob/master/readmeai/cli/main.py) | The readmeai/cli/main.py file serves as the CLI entrypoint for the readme-ai application. It processes command-line arguments, such as align, API, badge customizations, and language preference, and passes these parameters to the readme-ai function. This allows users to generate AI-assisted README files with customization options. |
diff --git a/examples/markdown/readme-python.md b/examples/markdown/readme-python.md
index ec271ca5..fc5af57c 100644
--- a/examples/markdown/readme-python.md
+++ b/examples/markdown/readme-python.md
@@ -265,7 +265,7 @@ The `readme-ai` project is an automated README generator leveraging AI to synthe
| --- | --- |
| [prompts.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/prompts.toml) | The `prompts.toml` file in `readmeai/config/settings` provides templates for generating README content. It includes prompts for creating a project logo and a Markdown table summarizing key project features. The file aims to streamline the process of crafting engaging project documentation. |
| [parsers.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/parsers.toml) | Parse and analyze project configuration and dependency files for various CI/CD, configuration, infrastructure, monitoring, and orchestration setups. |
-| [blacklist.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/blacklist.toml) | Excludes specified directories, file extensions, and names from preprocessing. |
+| [ignore_list.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/ignore_list.toml) | Excludes specified directories, file extensions, and names from preprocessing. |
| [languages.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/languages.toml) | Defines programming language extensions and their names for the project. |
| [config.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/config.toml) | This code file configures settings for the README AI project, including file resources, Git repository, language model API, and markdown templates. |
| [markdown.toml](https://github.com/eli64s/readme-ai/blob/master/readmeai/config/settings/markdown.toml) | This code file generates a README.md template for the parent repository, showcasing project details and badges. |
diff --git a/examples/markdown/readme-readmeai.md b/examples/markdown/readme-readmeai.md
new file mode 100644
index 00000000..d7b0f5d2
--- /dev/null
+++ b/examples/markdown/readme-readmeai.md
@@ -0,0 +1,464 @@
+[]()
+
+## README-AI
+
+ *Empowering READMEs with AI magic!*
+
++ + + + +
+ ++ +
++
+ Empower Your SQL, Automate with Confidence! +
++ + + + +
++ Built with the tools and technologies: +
++ + + + + +
+ ++ +
++
+ Empower READMEs with AI magic, effortlessly. +
++ + + + +
++ Built with the tools and technologies: +
++ + + + +
+ ++ + + + +
+ +โฏ REPLACE-ME
",
+ description="Placeholder image for missing content.",
+ )
quickstart: str
- shields_icons: str
+ requirements: str = Field(
+ default="",
+ description="Project system prerequisites.",
+ )
+ shieldsio_icons: str
skill_icons: str
- slogan: str
- tables: str
- toc: str
+ slogan: str = Field(
+ default="โฏ INSERT-PROJECT-SLOGAN",
+ description="Project tagline or slogan.",
+ )
+ tables: str = Field(default="", description="Markdown table options.")
+ toc_style: str = Field(
+ default="bullet",
+ description="Table of contents content.",
+ )
tree: str
- tree_depth: int
- width: str
+ tree_depth: PositiveInt = Field(
+ default=2,
+ ge=1,
+ le=5,
+ description="Depth of directory tree.",
+ )
+
+ @field_validator("badge_color")
+ def set_color(cls, value: str) -> str:
+ """
+ Validates badge color value and returns the hex code.
+ """
+ try:
+ return Color(value).as_hex().strip("#")
+ except ValueError as exc:
+ _logger.error(f"Invalid color value '{value}': {exc}")
+ return cls.model_fields["badge_color"].default
+
+ @model_validator(mode="after")
+ def set_width(self) -> MarkdownSettings:
+ """
+ Validates and sets the width for the project logo image.
+ """
+ if str(self.image).lower() == ImageOptions.LLM.name.lower():
+ self.image_width = "60%"
+ return self
class ModelSettings(BaseModel):
- """LLM API settings used for generating text for the README.md file."""
+ """
+ LLM API model settings and parameters.
+ """
- api: Optional[str]
- base_url: Optional[HttpUrl]
- context_window: Optional[int]
- encoder: Optional[str]
- model: Optional[str]
- temperature: Optional[float]
- tokens: Optional[int]
- top_p: Optional[float]
+ api: str | None = Field(
+ default=ModelOptions.OFFLINE,
+ description="API key for the LLM model.",
+ )
+ base_url: str
+ context_window: PositiveInt
+ encoder: str
+ host_name: AnyHttpUrl
+ localhost: AnyHttpUrl
+ model: str
+ path: str
+ temperature: float
+ tokens: PositiveInt
+ top_p: PositiveFloat
class Settings(BaseModel):
- """Nested data model to store all configuration settings."""
+ """
+ Pydantic settings model for the readme-ai package.
+ """
api: APISettings
files: FileSettings
@@ -119,62 +282,55 @@ class Settings(BaseModel):
llm: ModelSettings
md: MarkdownSettings
- class Config:
- """Pydantic configuration settings."""
-
- validate_assignment = True
+ model_config = ConfigDict(
+ validate_assignment=True,
+ )
class ConfigLoader:
- """Loads the configuration settings for the CLI."""
+ """
+ Loads the configuration settings for the readme-ai package.
+ """
def __init__(
self,
- config_file: Union[str, Path] = "config.toml",
- sub_module: str = "settings",
+ config_file: str = "config.toml",
+ submodule: str = "settings",
) -> None:
"""Initialize ConfigLoader with the base configuration file."""
- self._logger = Logger(__name__)
self.file_handler = FileHandler()
self.config_file = config_file
- self.sub_module = sub_module
- self.config = self._base_config
+ self.submodule = submodule
+ self.config = self._load_config
self.load_settings()
@cached_property
- def _base_config(self) -> Settings:
+ def _load_config(self) -> Settings:
"""Loads the base configuration file."""
file_path = get_resource_path(
- file_path=self.config_file, sub_module=self.sub_module
+ file_path=self.config_file,
+ submodule=self.submodule,
)
- config_dict = self.file_handler.read(file_path)
- return Settings.parse_obj(config_dict)
+ config_dict = self.file_handler.read(str(file_path))
+ return Settings.model_validate(config_dict)
def load_settings(self) -> dict[str, dict]:
- """Loads all configuration settings.
-
- - Loads the base configuration file from `settings/config.toml`.
- - Loads any additional configuration files specified in the base settings
- under the `files` key.
-
- Returns:
- A dictionary containing all loaded configuration settings, where
- the keys are the section names from `Settings` and the values
- are their respective data dictionaries.
"""
- settings = self._base_config.dict()
+ Loads all configuration settings.
+ 1. Loads the base configuration file from 'settings/config.toml'.
+ 2. Loads all additional TOML files defined in 'FileSettings.'
+ """
+ settings = self._load_config.model_dump()
- for key, file_name in settings["files"].items():
- if not file_name.endswith(".toml"):
+ for key, file_path in settings["files"].items():
+ if not file_path.endswith(".toml"):
continue
- file_path = get_resource_path(
- file_path=file_name,
- )
- data_dict = self.file_handler.read(file_path)
- settings[key] = data_dict
- setattr(self, key, data_dict)
- self._logger.info(
- f"Loaded configuration file: {self.sub_module}/{file_name}"
- )
+
+ file_path = get_resource_path(file_path=file_path)
+ data_config = self.file_handler.read(file_path)
+ settings[key] = data_config
+ setattr(self, key, data_config)
+
+ _logger.info(f"Config loaded: {self.submodule}/{file_path}")
return settings
diff --git a/readmeai/config/settings/commands.toml b/readmeai/config/settings/commands.toml
index facafed1..4b81a74f 100644
--- a/readmeai/config/settings/commands.toml
+++ b/readmeai/config/settings/commands.toml
@@ -1,7 +1,7 @@
# Programming language install, run, and test commands.-vs-binding = {
[quickstart_guide]
-default = ["> INSERT-INSTALL-COMMANDS", "> INSERT-RUN-COMMANDS", "> INSERT-TEST-COMMANDS"]
+default = ["โฏ INSERT-INSTALL-COMMANDS", "โฏ INSERT-RUN-COMMANDS", "โฏ INSERT-TEST-COMMANDS"]
C = ["gcc -o myapp main.c", "./myapp", "/* No common unit test framework in C */"]
CPP = ["g++ -o myapp main.cpp", "./myapp", "googletest"]
CSharp = ["dotnet build", "dotnet run", "dotnet test"]
diff --git a/readmeai/config/settings/config.toml b/readmeai/config/settings/config.toml
index c3511b96..67dbe54a 100644
--- a/readmeai/config/settings/config.toml
+++ b/readmeai/config/settings/config.toml
@@ -1,17 +1,17 @@
# Default API Settings
[api]
-content = "You're a brilliant Tech Lead and Software Engineer with a passion for open-source projects."
rate_limit = 10
+system_message = "You're a brilliant Tech Lead and Software Engineer with a passion for open-source projects."
# File Resources
[files]
-blacklist = "blacklist.toml"
+ignore_list = "ignore_list.toml"
commands = "commands.toml"
languages = "languages.toml"
markdown = "markdown.toml"
parsers = "parsers.toml"
prompts = "prompts.toml"
-shields_icons = "icons.json"
+shieldsio_icons = "shieldsio_icons.json"
skill_icons = "skill_icons.json"
# Git Repository Settings
@@ -21,91 +21,29 @@ repository = "https://github.com/eli64s/readme-ai"
# Language Model API Settings
[llm]
api = "openai"
-base_url = "https://api.openai.com/v1/chat/completions"
-context_window = 4000
+base_url = "https://api.openai.com/v1/engines/"
+context_window = 3999
encoder = "cl100k_base"
+host_name = "https://api.openai.com/"
+localhost = "http://localhost:11434/"
model = "gpt-3.5-turbo"
-temperature = 0.9
-tokens = 650
+path = "v1/chat/completions"
+temperature = 0.0
+tokens = 699
top_p = 0.9
# Markdown Template Settings
[md]
-# Markdown Default Settings
-alignment = "center"
-emojis = false
-image = ""
-placeholder = "โบ INSERT-TEXT-HERE
"
-slogan = ""
-tables = ""
-width = "100"
-
-# Header Template
-header = """\
-- -
--
- {slogan} -
-\n\t{shields_icons}
-
\n\t{badge_icons}
-""" - # Badges -badge_color = "0080ff" -badge_style = "flat" -badge_icons = """\tDeveloped with the software and tools below.\n\n\n\t{badge_icons}""" -shields_icons = """ +badge_icons = """\tBuilt with the tools and technologies:\n
\n\n\t{badge_icons}"""
+shieldsio_icons = """
\t
\t
\t
"""
skill_icons = """\n\t\t\n\t"""
-
-# Table of Contents
-quick_links = """
-## ๐ Quick Links
-
-> - [๐ Overview](#-overview)
-> - [๐งฉ Features](#-features)
-> - [๐๏ธ Repository Structure](#๏ธ-repository-structure)
-> - [๐ฆ Modules](#-modules)
-> - [๐ Getting Started](#-getting-started)
-> - [โ๏ธ Installation](#๏ธ-installation)
-> - [๐ค Usage](#-usage)
-> - [๐งช Tests](#-tests)
-> - [๐ Project Roadmap](#-project-roadmap)
-> - [๐ค Contributing](#-contributing)
-> - [๐ License](#-license)
-> - [๐ Acknowledgments](#-acknowledgments)
-
----
-"""
-
-toc = """
-Table of Contents
-
-- [๐ Overview](#-overview)
-- [๐งฉ Features](#-features)
-- [๐๏ธ Repository Structure](#๏ธ-repository-structure)
-- [๐ฆ Modules](#-modules)
-- [๐ Getting Started](#-getting-started)
- - [โ๏ธ Installation](#๏ธ-installation)
- - [๐ค Usage](#-usage)
- - [๐งช Tests](#-tests)
-- [๐ Project Roadmap](#-project-roadmap)
-- [๐ค Contributing](#-contributing)
-- [๐ License](#-license)
-- [๐ Acknowledgments](#-acknowledgments)
-
source
source
+
@@ -253,12 +189,10 @@ This project is protected under the [SELECT-A-LICENSE](https://choosealicense.co
---
-## ๐ Acknowledgments
+## ๐ Acknowledgments
- List any resources, contributors, inspiration, etc. here.
-[**Return**](#-overview)
-
---
"""
@@ -269,7 +203,7 @@ contact = """
+
""" -custom_badge = """""" +cli_docs = """ +#### Command-Line Interface + +The project supports the following command-line interface options: + +```sh +โฏ {cli_command} +``` + +--- +""" + +api_docs = """ +#### API Documentation + +The project API documentation is available at: [API Documentation]({api_url}). + +--- +""" + +test_framework = """ +### ๐งช Testing + +This project uses **`{test_framework}`** for testing. + +- **`{test_framework}`** + +Execute the test suite using the following command: + +```sh +โฏ {test_command} +``` + +--- +""" diff --git a/readmeai/config/settings/blacklist.toml b/readmeai/config/settings/ignore_list.toml similarity index 99% rename from readmeai/config/settings/blacklist.toml rename to readmeai/config/settings/ignore_list.toml index e08aeb79..dc91b4db 100644 --- a/readmeai/config/settings/blacklist.toml +++ b/readmeai/config/settings/ignore_list.toml @@ -1,6 +1,6 @@ # Directories, file extensions, and file names to be excluded from preprocessing. -[blacklist] +[ignore_list] directories = [ ".DS_Store", ".dvc", diff --git a/readmeai/config/settings/markdown.toml b/readmeai/config/settings/markdown.toml index bf221c2b..c8a02720 100644 --- a/readmeai/config/settings/markdown.toml +++ b/readmeai/config/settings/markdown.toml @@ -1,34 +1,24 @@ # Markdown template code blocks to construct a README.md file. [md] -# Markdown Default Settings -alignment = "center" -emojis = false -image = "" -placeholder = "โบ INSERT-TEXT-HERE
"
-slogan = ""
-tables = ""
-
# Header Template
header = """\
-- +
+
--
+
+
{slogan}
-\n\t{shields_icons}
-
\n\t{badge_icons}
+\n\t{shieldsio_icons}
+
\n\t{badge_icons}
""" # Badges -badge_color = "0080ff" -badge_style = "flat" -badge_icons = """\tDeveloped with the software and tools below.\n\n\n\t{badge_icons}""" -shields_icons = """ +badge_icons = """\tDeveloped with the software and tools below.\n
\n\n\t{badge_icons}"""
+shieldsio_icons = """
\t
\t
@@ -36,46 +26,6 @@ shields_icons = """
"""
skill_icons = """\n\t\t\n\t"""
-# Table of Contents
-quick_links = """
-## ๐ Quick Links
-
-> - [๐ Overview](#-overview)
-> - [๐งฉ Features](#-features)
-> - [๐๏ธ Repository Structure](#-repository-structure)
-> - [๐ฆ Modules](#-modules)
-> - [๐ Getting Started](#-getting-started)
-> - [โ๏ธ Installation](#๏ธ-installation)
-> - [๐ค Usage](#-usage)
-> - [๐งช Tests](#-tests)
-> - [๐ Project Roadmap](#-project-roadmap)
-> - [๐ค Contributing](#-contributing)
-> - [๐ License](#-license)
-> - [๐ Acknowledgments](#-acknowledgments)
-
----
-"""
-toc = """\
-
-Table of Contents
-
-- [๐ Overview](#-overview)
-- [๐งฉ Features](#-features)
-- [๐๏ธ Repository Structure](#-repository-structure)
-- [๐ฆ Modules](#-modules)
-- [๐ Getting Started](#-getting-started)
- - [โ๏ธ Installation](#๏ธ-installation)
- - [๐ค Usage](#-usage)
- - [๐งช Tests](#-tests)
-- [๐ Project Roadmap](#-project-roadmap)
-- [๐ค Contributing](#-contributing)
-- [๐ License](#-license)
-- [๐ Acknowledgments](#-acknowledgments)
-
source
source
+
@@ -216,13 +177,13 @@ Contributions are welcome! Here are several ways you can contribute:
---
-## ๐ License
+## ๐ License
This project is protected under the [SELECT-A-LICENSE](https://choosealicense.com/licenses) License. For more details, refer to the [LICENSE](https://choosealicense.com/licenses/) file.
---
-## ๐ Acknowledgments
+## ๐ Acknowledgments
- List any resources, contributors, inspiration, etc. here.
@@ -238,7 +199,7 @@ contact = """
+
@@ -257,3 +218,7 @@ contributor_graph = """
"""
custom_badge = """"""
+
+slogan = ""
+
+tables = ""
diff --git a/readmeai/config/settings/prompts.toml b/readmeai/config/settings/prompts.toml
index 29c62f84..fb01c4f5 100644
--- a/readmeai/config/settings/prompts.toml
+++ b/readmeai/config/settings/prompts.toml
@@ -38,18 +38,29 @@ each response with a verb or a noun to make the summary more engaging and impact
- Do not include quotes, code snippets, or bullet points in your response. \
- Your response should be a maximum of 50 words.
"""
+logo = """
+Create a **simple, cute mascot icon** in a **minimalist style** specifically designed as a standalone iPhone app icon. The icon should have a **singular main centerpiece** that defines the entire shape of the icon, without any external borders or background.
-logo = """Design a square app logo for the software project - "{project_name}". \
-The logo should feature the project name in bold and easily readable letters. \
-The color scheme should be engaging and suitable for a technology-focused app, \
-with a white background behind the stylized app logo square with rounded corners. \
-While designing the logo, please reference the following codebase details: \n
---------------------------------------------------------------------------------
-Repository Details:
-Project Name: {project_name}
-Project Overview: {project_overview}
-Project Catch Phrase/Slogan: {project_slogan}
---------------------------------------------------------------------------------
+**Characteristics:**
+1. Use **thick, rounded outlines** to define the mascot's shape.
+2. Employ **basic geometric shapes** for facial features (e.g., dots for eyes, simple shapes for nose/mouth).
+3. Ensure a **chunky, squat body proportion** for a cute appearance.
+4. Incorporate **distinctive features** of the mascot animal/character/hero in a **simplified form**.
+5. Maintain a **friendly, approachable expression**.
+6. Fill the **square app-icon canvas** with the mascot, centered and occupying most of the space.
+7. The mascot's shape itself should form the icon boundaryโ**no additional borders or background elements**.
+8. The design should be **visually complete and balanced** without relying on a background.
+
+**Mascot Concept:**
+- Design a [insert specific animal or character relevant to your project, e.g., 'playful robot', 'curious owl', 'friendly octopus'].
+- The aesthetic should be **clean, adorable, and instantly recognizable** even at small sizes.
+
+**Project Context:**
+- **Project Name:** {project_name}
+- **Project Overview:** {project_overview}
+- **Project Slogan:** {project_slogan}
+
+The mascot should embody the spirit of your project, capturing its essence and purpose. The final design must function as a standalone icon, with the mascot's shape defining the boundaries and serving as the singular main centerpiece.
"""
overview = """Analyze the codebase, {0}, and provide a robust, yet succinct overview of the software \
@@ -76,24 +87,3 @@ Other Requirements: \n
- Your response should be a maximum of 8 words.
- The slogan should be concise and memorable.
"""
-
-mermaid = """Create a visual representation of the software project '{0}' using a flowchart diagram. \
-The diagram should clearly illustrate the main components and the flow of data or control between them, \
-representing the codebase's architecture and execution path. The output should be formatted as a Markdown code block with Mermaid syntax. \
-Below is a template for the Mermaid flowchart that you can customize based on the project's specifics: \n
---------------------------------------------------------------------------------
-
-```mermaid
-flowchart LR
-
-A[Hard] -->|Text| B(Round)
-B --> C{Decision}
-C -->|One| D[Result 1]
-C -->|Two| E[Result 2]
-```
-
---------------------------------------------------------------------------------
-While generating the diagram, please reference the following codebase details:
-File Summaries: {1}
---------------------------------------------------------------------------------
-"""
diff --git a/readmeai/config/settings/quickstart.toml b/readmeai/config/settings/quickstart.toml
new file mode 100644
index 00000000..08be6edd
--- /dev/null
+++ b/readmeai/config/settings/quickstart.toml
@@ -0,0 +1,487 @@
+[default]
+tool = "Default"
+install = "echo 'No specific installation instructions available'"
+run = "echo 'No specific run instructions available'"
+test = "echo 'No specific test instructions available'"
+shield = "https://img.shields.io/badge/Tool-Generic-lightgrey?style={badge_style}"
+website = "https://example.com"
+
+[bash]
+tool = "Bash"
+install = "./scripts/install.sh"
+run = "./scripts/run.sh"
+test = "./scripts/test.sh"
+shield = "https://img.shields.io/badge/Shell_Script-121011.svg?style={badge_style}&logo=gnu-bash&logoColor=white"
+website = "https://www.gnu.org/software/bash/"
+
+[dockerfile]
+tool = "Docker"
+install = "docker build -t {project_name} ."
+run = "docker run -it {project_name}"
+test = "docker exec -it {project_name} pytest"
+shield = "https://img.shields.io/badge/Docker-2CA5E0.svg?style={badge_style}&logo=docker&logoColor=white"
+website = "https://www.docker.com/"
+
+[docker_compose_yml]
+tool = "Docker Compose"
+install = "docker-compose build"
+run = "docker-compose up"
+test = "docker-compose run --rm {service} pytest"
+shield = "https://img.shields.io/badge/Docker_Compose-2CA5E0.svg?style={badge_style}&logo=docker&logoColor=white"
+website = "https://docs.docker.com/compose/"
+
+[docker_compose_yaml]
+tool = "Docker Compose"
+install = "docker-compose build"
+run = "docker-compose up"
+test = "docker-compose run --rm {service} pytest"
+shield = "https://img.shields.io/badge/Docker_Compose-2CA5E0.svg?style={badge_style}&logo=docker&logoColor=white"
+website = "https://docs.docker.com/compose/"
+
+[makefile]
+tool = "Make"
+install = "make install"
+run = "make run"
+test = "make test"
+shield = "https://img.shields.io/badge/GNU_Make-A8B9CC.svg?style={badge_style}&logo=gnu-make&logoColor=white"
+website = "https://www.gnu.org/software/make/"
+
+[poetry_lock]
+tool = "Poetry"
+install = "poetry install"
+run = "poetry run python {entrypoint}"
+test = "poetry run pytest"
+shield = "https://img.shields.io/badge/Poetry-3B5526.svg?style={badge_style}&logo=poetry&logoColor=white"
+website = "https://python-poetry.org/"
+
+[pipfile_lock]
+tool = "Pipenv"
+install = "pipenv install"
+run = "pipenv shell && pipenv run python {entrypoint}"
+test = "pipenv shell && pipenv run pytest"
+shield = "https://img.shields.io/badge/Pipenv-3775A9.svg?style={badge_style}&logo=pypi&logoColor=white"
+website = "https://pipenv.pypa.io/"
+
+[requirements_txt]
+tool = "Pip"
+install = "pip install -r requirements.txt"
+run = "python {entrypoint}"
+test = "pytest"
+shield = "https://img.shields.io/badge/pip-3775A9.svg?style={badge_style}&logo=pypi&logoColor=white"
+website = "https://pip.pypa.io/"
+
+[environment_yaml]
+tool = "Conda"
+install = "conda env create -f environment.yaml"
+run = "conda activate
+
+
+
" if i + badges_per_line < total else f"{line}\n"
+ f"{line}\n\t
" if i + badges_per_line < total else f"{line}\n",
)
return "\n\t".join(lines)
def build_default_badges(
- config: ConfigLoader, full_name: str, host: str
+ config: Settings,
+ full_name: str,
+ host: str,
) -> str:
"""Build metadata badges using shields.io."""
- return config.md.shields_icons.format(
+ return config.md.shieldsio_icons.format(
host=host,
full_name=full_name,
badge_color=config.md.badge_color,
@@ -50,7 +49,9 @@ def build_default_badges(
def build_project_badges(
- dependencies: list[str], icons: dict[str, str], style: str
+ dependencies: list[str],
+ icons: dict[str, str],
+ style: str,
) -> str:
"""Build HTML badges for project dependencies."""
badges = [
@@ -65,14 +66,17 @@ def build_project_badges(
return _format_badges(badges)
-def shields_icons(
- conf: ConfigLoader, dependencies: list, full_name: str, git_host: str
-) -> Tuple[str, str]:
+def shieldsio_icons(
+ conf: Settings,
+ dependencies: list,
+ full_name: str,
+ git_host: str,
+) -> tuple[str, str]:
"""
Generates badges for the README using shields.io icons.
"""
icons_path = get_resource_path(
- conf.files.shields_icons,
+ conf.files.shieldsio_icons,
_package,
_submodule,
)
@@ -81,31 +85,34 @@ def shields_icons(
default_icons = build_default_badges(conf, full_name, git_host)
project_badges = build_project_badges(
- dependencies, icons_dict, conf.md.badge_style
+ dependencies,
+ icons_dict,
+ conf.md.badge_style,
)
project_badges = conf.md.badge_icons.format(
- alignment=conf.md.alignment, badge_icons=project_badges
+ align=conf.md.align,
+ badge_icons=project_badges,
)
if (
conf.md.badge_style == BadgeOptions.DEFAULT.value
- and git_host != GitHost.LOCAL
+ and git_host != GitHost.LOCAL.name
):
return (
default_icons,
"\n",
)
- if git_host == GitHost.LOCAL:
+ if git_host == GitHost.LOCAL.name:
return (
- "\n",
+ "",
project_badges,
)
return default_icons, project_badges
-def skill_icons(conf: ConfigLoader, dependencies: list) -> str:
+def skill_icons(conf: Settings, dependencies: list) -> str:
"""
Generates badges for the README using skill icons, from the
repository - https://github.com/tandpfun/skill-icons.
@@ -113,21 +120,23 @@ def skill_icons(conf: ConfigLoader, dependencies: list) -> str:
dependencies.extend(["md"])
icons_path = get_resource_path(
- conf.files.skill_icons, _package, _submodule
+ conf.files.skill_icons,
+ _package,
+ _submodule,
)
icons_dict = FileHandler().read(icons_path)
- skill_icons = [
+ icons = [
icon for icon in icons_dict["icons"]["names"] if icon in dependencies
]
- skill_icons = ",".join(skill_icons)
- skill_icons = icons_dict["url"]["base_url"] + skill_icons
+ formatted_icons = icons_dict["url"]["base_url"] + ",".join(icons)
if conf.md.badge_style == "skills-light":
- skill_icons = f"{skill_icons}&theme=light"
+ formatted_icons = f"{formatted_icons}&theme=light"
- conf.md.skill_icons = conf.md.skill_icons.format(skill_icons)
+ conf.md.skill_icons = conf.md.skill_icons.format(formatted_icons)
return conf.md.badge_icons.format(
- alignment=conf.md.alignment, badge_icons=conf.md.skill_icons
+ align=conf.md.align,
+ badge_icons=conf.md.skill_icons,
)
diff --git a/readmeai/generators/builder.py b/readmeai/generators/builder.py
index 40826c09..7cf4a50f 100644
--- a/readmeai/generators/builder.py
+++ b/readmeai/generators/builder.py
@@ -1,15 +1,17 @@
-"""Builds each section of the README Markdown file."""
+"""
+Builds each section of the README Markdown file.
+"""
__package__ = "readmeai"
from pathlib import Path
-from typing import List
-from readmeai.cli.options import BadgeOptions
-from readmeai.config.settings import ConfigLoader
+from readmeai.config.settings import BadgeOptions, ConfigLoader
from readmeai.generators import badges, tables, tree, utils
from readmeai.generators.quickstart import get_setup_data
-from readmeai.services.git import GitHost
+from readmeai.templates.header import HeaderTemplate
+from readmeai.templates.toc import ToCTemplate
+from readmeai.vcs.providers import GitHost
class MarkdownBuilder:
@@ -18,11 +20,10 @@ class MarkdownBuilder:
def __init__(
self,
config_loader: ConfigLoader,
- dependencies: List[str],
- summaries: tuple,
+ dependencies: list[str],
+ summaries: list[tuple[str, str]],
temp_dir: str,
):
- """Initializes the MarkdownBuilder class."""
self.deps = dependencies
self.summaries = summaries
self.temp_dir = Path(temp_dir)
@@ -35,29 +36,67 @@ def __init__(
if self.git.host_domain != GitHost.LOCAL.name.lower()
else f"../{self.git.name}"
)
+ self.header_template = HeaderTemplate(self.md.header_style)
+ self.toc_template = ToCTemplate(self.md.toc_style)
@property
def md_header(self) -> str:
- """Generates the README header section."""
+ """
+ Generates the README header section.
+ """
if BadgeOptions.SKILLS.value not in self.md.badge_style:
- md_shields, md_badges = badges.shields_icons(
- self.config, self.deps, self.git.full_name, self.git.host
+ md_shields, md_badges = badges.shieldsio_icons(
+ self.config,
+ self.deps,
+ str(self.git.full_name),
+ str(self.git.host),
)
else:
md_shields = (
- ""
+ ""
)
md_badges = badges.skill_icons(self.config, self.deps)
- return self.md.header.format(
- alignment=self.md.alignment,
- image=self.md.image,
- width=self.md.width,
- repo_name=self.git.name.upper(),
- slogan=self.md.slogan,
- shields_icons=md_shields,
- badge_icons=md_badges,
- )
+ header_data = {
+ "align": self.md.align,
+ "image": self.md.image,
+ "image_width": self.md.image_width,
+ "repo_name": self.git.name.upper()
+ if self.git.name
+ else self.md.placeholder,
+ "slogan": self.md.slogan,
+ "shields_icons": md_shields,
+ "badge_icons": md_badges,
+ }
+ return self.header_template.render(header_data)
+
+ @property
+ def md_toc(self) -> str:
+ """
+ Generates the README Table of Contents section.
+ """
+ toc_data = {
+ "sections": [
+ {"title": "๐ Overview"},
+ {"title": "๐พ Features"},
+ {"title": "๐ Repository Structure"},
+ {"title": "๐งฉ Modules"},
+ {
+ "title": "๐ Getting Started",
+ "subsections": [
+ {"title": "๐ Prerequisites"},
+ {"title": "๐ฆ Installation"},
+ {"title": "๐ค Usage"},
+ {"title": "๐งช Tests"},
+ ],
+ },
+ {"title": "๐ Project Roadmap"},
+ {"title": "๐ค Contributing"},
+ {"title": "๐ License"},
+ {"title": "๐ Acknowledgments"},
+ ],
+ }
+ return self.toc_template.render(toc_data)
@property
def md_summaries(self) -> str:
@@ -92,10 +131,10 @@ def md_quickstart(self) -> str:
return self.md.quickstart.format(
repo_name=self.git.name,
repo_url=self.repo_url,
+ prerequisites=setup_data.prerequisites,
install_command=setup_data.install_command,
run_command=setup_data.run_command,
test_command=setup_data.test_command,
- system_requirements=setup_data.prerequisites,
)
@property
@@ -110,10 +149,12 @@ def md_contributing(self) -> str:
)
def build(self) -> str:
- """Builds the README Markdown file."""
+ """
+ Builds each section of the README.md file.
+ """
md_contents = [
self.md_header,
- self.md.toc.format(repo_name=self.git.name),
+ self.md_toc,
self.md.overview,
self.md.features,
self.md_tree,
diff --git a/readmeai/generators/quickstart.py b/readmeai/generators/quickstart.py
index 5f383b1f..abee0ed2 100644
--- a/readmeai/generators/quickstart.py
+++ b/readmeai/generators/quickstart.py
@@ -1,9 +1,10 @@
-"""Dynamically creates the 'Quickstart' section of the README file."""
+"""
+Dynamically generate 'Quickstart' guides for the README file.
+"""
import traceback
from dataclasses import dataclass
from pathlib import Path
-from typing import Dict, List
from readmeai.config.settings import ConfigLoader
from readmeai.core.logger import Logger
@@ -13,60 +14,65 @@
@dataclass
class QuickStart:
- """Information about using, running, and testing a repository."""
+ """
+ Information about using, running, and testing a repository.
+ """
install_command: str
run_command: str
test_command: str
prerequisites: str
- language_counts: Dict[str, int]
- language_key: str
- language_name: str = None
+ language_counts: dict[str, int]
+ language_key: str | None
+ language_name: str | None = None
def count_languages(
- summaries: List[str], config_loader: ConfigLoader
-) -> Dict[str, int]:
+ summaries: tuple,
+ config_loader: ConfigLoader,
+) -> dict[str, int]:
"""
Counts the occurrences of each language in the summaries.
"""
parser_files = config_loader.parsers.get("parsers")
- language_counts = {}
+ language_counts: dict[str, int] = {}
for file_path, _ in summaries:
- language = Path(file_path).suffix[1:]
+ language = Path(file_path).suffix[1:] or None
if str(file_path) in [
dependency_file for dependency_file in parser_files
]:
continue
- if language and language not in config_loader.blacklist:
+ if (
+ language
+ and language.strip()
+ and language not in config_loader.ignore_list
+ ):
language_counts[language] = language_counts.get(language, 0) + 1
return language_counts
-def get_top_language(language_counts: Dict[str, int]) -> str:
+def get_top_language(language_counts: dict[str, int]) -> str | None:
"""
Determines the top language.
"""
if not language_counts:
return None
-
- return max(sorted(language_counts), key=language_counts.get)
+ else:
+ return max(sorted(language_counts), key=language_counts.get)
def get_top_language_setup(
- language_counts: Dict[str, int], config_loader: ConfigLoader
+ language_counts: dict,
+ config_loader: ConfigLoader,
) -> QuickStart:
"""
Determines the top language and retrieves its setup commands.
"""
- if not language_counts:
- return None
-
languages = config_loader.languages.get("language_names")
commands = config_loader.commands.get("quickstart_guide")
@@ -77,15 +83,16 @@ def get_top_language_setup(
return QuickStart(
*quickstart_commands,
- prerequisites,
- language_counts,
- language_key,
- language_name,
+ prerequisites=prerequisites,
+ language_counts=language_counts,
+ language_key=language_key,
+ language_name=language_name,
)
def get_setup_data(
- config_loader: ConfigLoader, summaries: List[str]
+ config_loader: ConfigLoader,
+ summaries: tuple,
) -> QuickStart:
"""
Generates the 'Quick Start' section of the README file.
diff --git a/readmeai/generators/assets/icons.json b/readmeai/generators/svg/shieldsio_icons.json
similarity index 100%
rename from readmeai/generators/assets/icons.json
rename to readmeai/generators/svg/shieldsio_icons.json
diff --git a/readmeai/generators/assets/skill_icons.json b/readmeai/generators/svg/skill_icons.json
similarity index 100%
rename from readmeai/generators/assets/skill_icons.json
rename to readmeai/generators/svg/skill_icons.json
diff --git a/readmeai/generators/tables.py b/readmeai/generators/tables.py
index ddba82d7..138b1bda 100644
--- a/readmeai/generators/tables.py
+++ b/readmeai/generators/tables.py
@@ -1,37 +1,101 @@
-"""Creates markdown tables to store LLM text responses in the README file."""
+"""
+Creates Markdown tables to store LLM text responses in the README file.
+"""
from pathlib import Path
-from typing import List, Tuple
-from readmeai.services.git import fetch_git_file_url
+from readmeai.core.logger import Logger
+from readmeai.vcs.url_builder import GitURL
+
+_logger = Logger(__name__)
def construct_markdown_table(
- data: List[Tuple[str, str]], repo_url: str, full_name: str
+ data: list[tuple[str, str]],
+ repo_path: str | Path,
+ full_name: str,
+ max_rows: int = 100,
) -> str:
- """Builds a Markdown table from the provided data."""
+ """
+ Builds a Markdown table to store LLM text responses in README file.
+ """
+ assert isinstance(data, list), "Data must be a list"
+ assert all(
+ isinstance(item, tuple) and len(item) == 2 for item in data
+ ), "Each data item must be a tuple of (str, str)"
+ assert isinstance(
+ repo_path,
+ str | Path,
+ ), "repo_path must be a string or Path"
+ assert isinstance(full_name, str), "full_name must be a string"
+ assert (
+ isinstance(max_rows, int) and max_rows > 0
+ ), "max_rows must be a positive integer"
+
+ if not data:
+ _logger.warning("Empty data provided for Markdown table")
+ return ""
+
+ is_local_repo = Path(repo_path).exists()
+
+ if not is_local_repo:
+ try:
+ git_url = GitURL.create(str(repo_path))
+ except ValueError:
+ _logger.error(f"Invalid Git repository URL: {repo_path}")
+ is_local_repo = True # Fallback to treating it as a local path
+
headers = ["File", "Summary"]
table_rows = [headers, ["---", "---"]]
- for module, summary in data:
- file_name = str(Path(module).name)
- if "invalid" in full_name.lower():
- return file_name
- host_url = fetch_git_file_url(module, full_name, repo_url)
- md_format_host_url = f"[{file_name}]({host_url})"
- table_rows.append([md_format_host_url, summary])
+ for module, summary in data[:max_rows]:
+ file_name = Path(module).name
+ if is_local_repo:
+ file_path = Path(repo_path) / module
+ md_format_file_url = f"[{file_name}]({file_path})"
+ else:
+ try:
+ file_url = git_url.get_file_url(module)
+ md_format_file_url = f"[{file_name}]({file_url})"
+ except ValueError as e:
+ _logger.error(f"Error generating file URL for {module}: {e}")
+ md_format_file_url = file_name
+
+ table_rows.append([md_format_file_url, summary])
+
+ if len(data) > max_rows:
+ _logger.warning(
+ f"Table truncated. Showing {max_rows} out of {len(data)} rows.",
+ )
+ table_rows.append(["...", "..."])
+
+ return _format_as_markdown_table(table_rows)
+
+
+def _format_as_markdown_table(rows: list[list[str]]) -> str:
+ """
+ Formats the given rows as a Markdown table.
+ """
+ assert len(rows) >= 3, "Table must have at least headers and separator"
+ assert all(
+ len(row) == len(rows[0]) for row in rows
+ ), "All rows must have the same number of columns"
- return format_as_markdown_table(table_rows)
+ return "\n".join(f"| {' | '.join(row)} |" for row in rows)
def extract_folder_name(module: str) -> str:
- """Extracts the folder name from a module path."""
+ """
+ Extracts the folder name from a module path.
+ """
path_parts = Path(module).parts
return ".".join(path_parts[:-1]) if len(path_parts) > 1 else "."
-def format_as_markdown_table(rows: List[List[str]]) -> str:
- """Formats rows of data as a Markdown table."""
+def format_as_markdown_table(rows: list[list[str]]) -> str:
+ """
+ Formats rows of data as a Markdown table.
+ """
max_column_widths = [
max(len(str(row[col])) for row in rows) for col in range(len(rows[0]))
]
@@ -40,7 +104,7 @@ def format_as_markdown_table(rows: List[List[str]]) -> str:
"| "
+ " | ".join(
str(item).ljust(width)
- for item, width in zip(row, max_column_widths)
+ for item, width in zip(row, max_column_widths, strict=False)
)
+ " |"
for row in rows
@@ -50,9 +114,12 @@ def format_as_markdown_table(rows: List[List[str]]) -> str:
def format_code_summaries(
- placeholder: str, code_summaries: Tuple[str, str]
-) -> List[Tuple[str, str]]:
- """Converts the given code summaries into a formatted list."""
+ placeholder: str,
+ code_summaries: list[tuple[str, str]],
+) -> list[tuple[str, str]]:
+ """
+ Converts the given code summaries into a formatted list.
+ """
formatted_summaries = []
for summary in code_summaries:
@@ -68,17 +135,21 @@ def format_code_summaries(
def generate_markdown_tables(
table_widget: str,
- summaries: List[Tuple[str, str]],
+ summaries: list[tuple[str, str]],
project_name: str,
repository_url: str,
) -> str:
- """Produces Markdown tables for each project sub-directory."""
+ """
+ Produces Markdown tables for each project sub-directory.
+ """
summaries_by_folder = group_summaries_by_folder(summaries)
markdown_tables = []
for folder, entries in summaries_by_folder.items():
table_in_markdown = construct_markdown_table(
- entries, repository_url, project_name
+ entries,
+ repository_url,
+ project_name,
)
table_wrapper = table_widget.format(folder, table_in_markdown)
markdown_tables.append(table_wrapper)
@@ -86,15 +157,19 @@ def generate_markdown_tables(
return "\n".join(markdown_tables)
-def group_summaries_by_folder(summaries: List[Tuple[str, str]]) -> dict:
- """Groups code summaries by their sub-directory."""
- folder_map = {}
+def group_summaries_by_folder(summaries: list[tuple[str, str]]) -> dict:
+ """
+ Groups code summaries by their sub-directory.
+ """
+ folder_map: dict[str, list[tuple[str, str]]] = {}
for module, summary in summaries:
folder_name = extract_folder_name(module)
folder_map.setdefault(folder_name, []).append((module, summary))
return folder_map
-def is_valid_tuple_summary(summary: Tuple[str, str]) -> bool:
- """Checks if a summary is a valid tuple format."""
+def is_valid_tuple_summary(summary: tuple[str, str]) -> bool:
+ """
+ Checks if a summary is a valid tuple format.
+ """
return isinstance(summary, tuple) and len(summary) == 2
diff --git a/readmeai/generators/tree.py b/readmeai/generators/tree.py
index 9f25f2bf..568991b8 100644
--- a/readmeai/generators/tree.py
+++ b/readmeai/generators/tree.py
@@ -1,13 +1,21 @@
-"""Generates a directory tree structure for a code repository."""
+"""
+Generates a directory tree structure for a code repository.
+"""
from pathlib import Path
class TreeGenerator:
- """Generates a directory tree structure for a code repository."""
+ """
+ Generates a directory tree structure for a code repository.
+ """
def __init__(
- self, repo_name: str, root_dir: Path, repo_url: Path, max_depth: int
+ self,
+ repo_name: str,
+ root_dir: Path,
+ repo_url: Path,
+ max_depth: int,
):
self.repo_name = repo_name
self.root_dir = root_dir
@@ -21,7 +29,9 @@ def _build_tree(
is_last: bool = True,
depth: int = 0,
) -> str:
- """Generates a tree structure for a given directory."""
+ """
+ Generates a tree structure for a given directory.
+ """
if depth > self.max_depth:
return ""
@@ -42,7 +52,10 @@ def _build_tree(
for index, child in enumerate(children):
child_prefix = prefix + (" " if is_last else "โ ")
child_tree = self._build_tree(
- child, child_prefix, index == len(children) - 1, depth + 1
+ child,
+ child_prefix,
+ index == len(children) - 1,
+ depth + 1,
)
if child_tree:
@@ -51,9 +64,12 @@ def _build_tree(
return "\n".join(parts)
def tree(self) -> str:
- """Generates and formats a tree structure."""
+ """
+ Generates and formats a tree structure.
+ """
md_tree = self._build_tree(self.root_dir)
formatted_md_tree = md_tree.replace(
- self.root_dir.name, f"{self.repo_name}/"
+ self.root_dir.name,
+ f"{self.repo_name}/",
)
return formatted_md_tree
diff --git a/readmeai/generators/utils.py b/readmeai/generators/utils.py
index 205f64df..a6c56812 100644
--- a/readmeai/generators/utils.py
+++ b/readmeai/generators/utils.py
@@ -1,7 +1,8 @@
-"""Utilities to remove default emojis from markdown content."""
+"""
+Utilities to remove default emojis from markdown content.
+"""
import re
-from typing import List
EMOJI_PATTERN = re.compile(
pattern="["
@@ -21,8 +22,10 @@
)
-def remove_emojis(md_content: List[str]) -> List[str]:
- """Removes emojis from the content list."""
+def remove_emojis(md_content: list[str]) -> list[str]:
+ """
+ Removes emojis from the content list.
+ """
modified_content = []
for section in md_content:
@@ -44,7 +47,8 @@ def split_markdown_headings(markdown_text: str) -> dict:
Splits a markdown document by level 2 headings into separate sections.
"""
sections = re.split(r"(?m)^## ", markdown_text)
- split_sections = {}
+
+ split_sections: dict[str, str] = {}
for section in sections:
if section.strip():
diff --git a/readmeai/models/dalle.py b/readmeai/models/dalle.py
index e4ab67ea..16d8a475 100644
--- a/readmeai/models/dalle.py
+++ b/readmeai/models/dalle.py
@@ -1,8 +1,10 @@
"""
-Multi-modal model for generating images using OpenAI's DALL-E model.
+Handler for generating images using OpenAI's DALL-E model.
"""
import os
+from collections.abc import Generator
+from contextlib import contextmanager
from openai import Client, OpenAIError
from requests import get
@@ -13,22 +15,33 @@
class DalleHandler:
- """Generates and downloads images using OpenAI's DALL-E model."""
+ """
+ Generates and downloads images using OpenAI's DALL-E model.
+ """
- def __init__(self, config: ConfigLoader) -> None:
+ def __init__(self, conf: ConfigLoader) -> None:
"""Initialize the ImageGenerator class."""
- self.client = Client(api_key=os.getenv("OPENAI_API_KEY"))
- self.conf = config
+ self.conf = conf
+ self.filename = f"{conf.config.git.name}.png"
self._logger = Logger(__name__)
self._model_settings()
def _model_settings(self) -> None:
"""Initializes the DALL-E settings."""
+ self.client = Client(api_key=os.getenv("OPENAI_API_KEY"))
self.model = "dall-e-3"
self.size = "1792x1024"
self.quality = "standard"
self.n = 1
+ @contextmanager
+ def use_api(self) -> Generator:
+ """Yields the DALL-E handler."""
+ try:
+ yield self
+ finally:
+ self._logger.debug(f"Closed {self.model.upper()} API session.")
+
def _build_payload(self) -> str:
"""Formats the prompt string using configuration data."""
return {
@@ -43,7 +56,7 @@ def _build_payload(self) -> str:
"n": self.n,
}
- def run(self) -> str:
+ def _make_request(self) -> str:
"""Generates an image and returns its URL."""
try:
payload = self._build_payload()
@@ -52,31 +65,31 @@ def run(self) -> str:
return response.data[0].url
else:
self._logger.error(
- f"Failed to generate {self.model.upper()} image: {response}"
+ f"Failed to generate {self.model.upper()} image: {response}",
)
return ImageOptions.BLUE.value
except (Exception, OpenAIError) as exc:
self._logger.error(
- f"{self.model.upper()} image generation error: {exc}"
+ f"{self.model.upper()} image generation error: {exc}",
)
return ImageOptions.BLUE.value
def download(self, image_url) -> str:
"""Downloads an image from the given URL."""
- filename = f"{self.conf.config.git.name}.png"
try:
response = get(image_url)
- if response.status_code == 200:
- with open(filename, "wb") as f:
+ status_code = response.status_code
+
+ if status_code == 200:
+ with open(self.filename, "wb") as f:
f.write(response.content)
- return filename
+ self._logger.info(f"Image downloaded at: {image_url}")
+ return self.filename
else:
- self._logger.error(
- f"Failed to download image: {response.status_code}"
- )
- return ImageOptions.BLUE.value
+ self._logger.error(f"Failed to download image: {status_code}")
except Exception as exc:
self._logger.error(f"Failed to download image: {exc}")
- return ImageOptions.BLUE.value
+
+ return ImageOptions.BLUE.value
diff --git a/readmeai/models/factory.py b/readmeai/models/factory.py
index 889d8425..f94c9e53 100644
--- a/readmeai/models/factory.py
+++ b/readmeai/models/factory.py
@@ -1,32 +1,38 @@
"""
-Model factory that returns the appropriate LLM handler based on CLI input.
+Factory class that selects appropriate LLM API service based on CLI input.
"""
+from typing import ClassVar
+
from readmeai._exceptions import UnsupportedServiceError
-from readmeai.cli.options import ModelOptions as llms
-from readmeai.config.settings import ConfigLoader
+from readmeai.config.settings import ConfigLoader, ModelOptions
from readmeai.core.models import BaseModelHandler
from readmeai.models.gemini import GeminiHandler
from readmeai.models.offline import OfflineHandler
from readmeai.models.openai import OpenAIHandler
-class ModelFactory:
- """Factory that returns the appropriate LLM handler based on CLI input."""
+class ModelRegistry:
+ """
+ Returns the appropriate LLM API handler based on CLI input.
+ """
- _model_map = {
- llms.OFFLINE.value: OfflineHandler,
- llms.OLLAMA.value: OpenAIHandler,
- llms.OPENAI.value: OpenAIHandler,
- llms.GEMINI.value: GeminiHandler,
+ _model_map: ClassVar[dict] = {
+ # ModelOptions.ANTHROPIC.value: AnthropicHandler,
+ ModelOptions.GEMINI.value: GeminiHandler,
+ ModelOptions.OFFLINE.value: OfflineHandler,
+ ModelOptions.OLLAMA.value: OpenAIHandler,
+ ModelOptions.OPENAI.value: OpenAIHandler,
}
@staticmethod
- def model_handler(conf: ConfigLoader) -> BaseModelHandler:
- """Returns the appropriate LLM API handler based on CLI input."""
- llm_handler = ModelFactory._model_map.get(conf.config.llm.api)
- if llm_handler is None:
+ def get_backend(conf: ConfigLoader) -> BaseModelHandler:
+ """
+ Returns the appropriate LLM API handler based on CLI input.
+ """
+ backend_service = ModelRegistry._model_map.get(conf.config.llm.api)
+ if backend_service is None:
raise UnsupportedServiceError(
- f"Unsupported LLM service provided: {conf.config.llm.api}"
+ f"Unsupported LLM service provided: {conf.config.llm.api}",
)
- return llm_handler(conf)
+ return backend_service(conf)
diff --git a/readmeai/models/gemini.py b/readmeai/models/gemini.py
index 4265f87a..40889bb2 100644
--- a/readmeai/models/gemini.py
+++ b/readmeai/models/gemini.py
@@ -3,7 +3,6 @@
"""
import os
-from typing import List, Tuple
import aiohttp
import google.generativeai as genai
@@ -15,6 +14,7 @@
)
from readmeai.config.settings import ConfigLoader
+from readmeai.core.logger import Logger
from readmeai.core.models import BaseModelHandler
from readmeai.models.tokens import token_handler
from readmeai.utils.text_cleaner import clean_response
@@ -26,11 +26,13 @@ class GeminiHandler(BaseModelHandler):
def __init__(self, config_loader: ConfigLoader) -> None:
"""Initializes the Gemini API handler."""
super().__init__(config_loader)
+ self._logger = Logger(__name__)
self._model_settings()
def _model_settings(self):
"""Initializes the Gemini API LLM settings."""
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
+ api_key = os.getenv("GOOGLE_API_KEY")
+ genai.configure(api_key=api_key)
self.model = genai.GenerativeModel(self.config.llm.model)
self.temperature = self.config.llm.temperature
self.tokens = self.config.llm.tokens
@@ -39,10 +41,9 @@ def _model_settings(self):
async def _build_payload(self, prompt: str, tokens: int) -> dict:
"""Build payload for POST request to the Gemini API."""
return genai.types.GenerationConfig(
- # candidate_count=1,
- # stop_sequences=['x'],
max_output_tokens=self.tokens,
temperature=self.temperature,
+ top_p=self.top_p,
)
@retry(
@@ -53,19 +54,22 @@ async def _build_payload(self, prompt: str, tokens: int) -> dict:
aiohttp.ClientError,
aiohttp.ClientResponseError,
aiohttp.ClientConnectorError,
- )
+ ),
),
)
async def _make_request(
self,
- index: str,
- prompt: str,
- tokens: int,
- raw_files: List[Tuple[str, str]] = None,
- ) -> Tuple[str, str]:
- """Processes Gemini API responses and returns generated text."""
+ index: str | None,
+ prompt: str | None,
+ tokens: int | None,
+ raw_files: list[tuple[str, str]] | None,
+ ) -> list[tuple[str, str]]:
+ """
+ Processes Gemini API responses and returns generated text.
+ """
try:
prompt = await token_handler(self.config, index, prompt, tokens)
+
parameters = await self._build_payload(prompt, tokens)
async with self.rate_limit_semaphore:
@@ -74,15 +78,18 @@ async def _make_request(
generation_config=parameters,
)
response_text = response.text
+
self._logger.info(f"Response for '{index}':\n{response_text}")
+
return index, clean_response(index, response_text)
except (
aiohttp.ClientError,
aiohttp.ClientResponseError,
aiohttp.ClientConnectorError,
- ) as exc:
+ ):
self._logger.error(
- f"Error making request to Gemini API for `{index}`: {exc}"
+ f"Error processing request for prompt: {index}",
+ exc_info=True,
)
return index, self.config.md.placeholder
diff --git a/readmeai/models/offline.py b/readmeai/models/offline.py
index bf825edc..0e2af92f 100644
--- a/readmeai/models/offline.py
+++ b/readmeai/models/offline.py
@@ -2,7 +2,7 @@
Model handler for running the CLI without a LLM API service.
"""
-from typing import Any, Dict, List, Tuple
+from typing import Any
from readmeai.config.settings import ConfigLoader
from readmeai.core.models import BaseModelHandler
@@ -18,20 +18,22 @@ def __init__(self, config_loader: ConfigLoader) -> None:
def _model_settings(self):
"""Set default values for offline mode."""
- self.placeholder = self.config.md.placeholder
+ self.placeholder: str = self.config.md.placeholder
- async def _build_payload(self, prompt: str, tokens: int) -> Dict[str, Any]:
+ async def _build_payload(self, prompt: str, tokens: int) -> dict[str, Any]:
"""Builds the payload for the POST request to the LLM API."""
- ...
+ return {}
async def _make_request(
self,
- index: str = None,
- prompt: str = None,
- tokens: int = None,
- raw_files: List[Tuple[str, str]] = None,
- ) -> Tuple[str, str]:
- """Returns placeholder text where LLM API response would be."""
+ index: str | None,
+ prompt: str | None,
+ tokens: int | None,
+ raw_files: list[tuple[str, str]] | None,
+ ) -> list[tuple[str, str]]:
+ """
+ Returns placeholder text where LLM API response would be.
+ """
file_summaries = [
(str(file_path), self.placeholder) for file_path, _ in raw_files
]
diff --git a/readmeai/models/openai.py b/readmeai/models/openai.py
index eed26007..ca4a5e2f 100644
--- a/readmeai/models/openai.py
+++ b/readmeai/models/openai.py
@@ -3,7 +3,6 @@
"""
import os
-from typing import List, Tuple
import aiohttp
import openai
@@ -14,17 +13,16 @@
wait_exponential,
)
-from readmeai.cli.options import ModelOptions as llms
-from readmeai.config.settings import ConfigLoader
+from readmeai.config.settings import ConfigLoader, ModelOptions
from readmeai.core.models import BaseModelHandler
from readmeai.models.tokens import token_handler
from readmeai.utils.text_cleaner import clean_response
-_localhost = "http://localhost:11434/v1/"
-
class OpenAIHandler(BaseModelHandler):
- """OpenAI API LLM implementation."""
+ """
+ OpenAI API LLM implementation, with Ollama support.
+ """
def __init__(self, config_loader: ConfigLoader) -> None:
"""Initialize OpenAI API LLM handler."""
@@ -32,22 +30,26 @@ def __init__(self, config_loader: ConfigLoader) -> None:
self._model_settings()
def _model_settings(self):
- """Set default values for OpenAI API."""
+ """Setup configuration for OpenAI/OLLAMA LLM handlers."""
+ self.host_name = self.config.llm.host_name
+ self.localhost = self.config.llm.localhost
self.model = self.config.llm.model
+ self.path = self.config.llm.path
self.temperature = self.config.llm.temperature
self.tokens = self.config.llm.tokens
self.top_p = self.config.llm.top_p
- if self.config.llm.api == llms.OPENAI.name:
- self.endpoint = self.config.llm.base_url
- self.client = openai.OpenAI(
- api_key=os.environ.get("OPENAI_API_KEY")
- )
- elif self.config.llm.api == llms.OLLAMA.name:
- self.endpoint = f"{_localhost}chat/completions"
+ if self.config.llm.api == ModelOptions.OPENAI.name:
+ self.url = f"{self.host_name}{self.path}"
+ self.client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+
+ elif self.config.llm.api == ModelOptions.OLLAMA.name:
+ self.url = f"{self.localhost}{self.path}"
self.client = openai.OpenAI(
- base_url=_localhost, api_key=llms.OLLAMA.name
+ base_url=f"{self.localhost}v1",
+ api_key=ModelOptions.OLLAMA.name,
)
+
self.headers = {"Authorization": f"Bearer {self.client.api_key}"}
async def _build_payload(self, prompt: str, tokens: int) -> dict:
@@ -56,7 +58,7 @@ async def _build_payload(self, prompt: str, tokens: int) -> dict:
"messages": [
{
"role": "system",
- "content": self.sys_content,
+ "content": self.system_message,
},
{"role": "user", "content": prompt},
],
@@ -74,31 +76,32 @@ async def _build_payload(self, prompt: str, tokens: int) -> dict:
aiohttp.ClientResponseError,
aiohttp.ClientConnectorError,
openai.OpenAIError,
- )
+ ),
),
)
async def _make_request(
self,
- index: str,
- prompt: str,
- tokens: int,
- raw_files: List[Tuple[str, str]] = None,
- ) -> Tuple[str, str]:
+ index: str | None,
+ prompt: str | None,
+ tokens: int | None,
+ raw_files: list[tuple[str, str]] | None,
+ ) -> list[tuple[str, str]]:
"""Processes OpenAI API LLM responses and returns generated text."""
try:
prompt = await token_handler(self.config, index, prompt, tokens)
+
parameters = await self._build_payload(prompt, tokens)
async with self._session.post(
- self.endpoint,
+ self.url,
headers=self.headers,
json=parameters,
) as response:
response.raise_for_status()
- response = await response.json()
- text = response["choices"][0]["message"]["content"]
- self._logger.info(f"Response for '{index}':\n{text}")
- return index, clean_response(index, text)
+ data = await response.json()
+ data = data["choices"][0]["message"]["content"]
+ self._logger.info(f"Generated text for '{index}': {data}")
+ return index, clean_response(index, data)
except (
aiohttp.ClientError,
@@ -106,6 +109,5 @@ async def _make_request(
aiohttp.ClientConnectorError,
openai.OpenAIError,
) as exc:
- message = f"Error making request for - `{index}`: {exc}"
- self._logger.error(message)
+ self._logger.error(f"Error making request for '{index}': {exc}")
return index, self.config.md.placeholder
diff --git a/readmeai/models/prompts.py b/readmeai/models/prompts.py
index a8468a32..0ff3d9ac 100644
--- a/readmeai/models/prompts.py
+++ b/readmeai/models/prompts.py
@@ -2,18 +2,17 @@
Methods for processing prompts used in LLM API requests.
"""
-from typing import Dict, List, Union
-
-import readmeai.config.settings as Settings
+from readmeai.config.settings import Settings
from readmeai.core.logger import Logger
_logger = Logger(__name__)
def get_prompt_context(prompts: dict, prompt_type: str, context: dict) -> str:
- """Generates a prompt for the LLM API."""
+ """
+ Generates a prompt for the LLM API.
+ """
prompt_template = get_prompt_template(prompts, prompt_type)
-
if not prompt_template:
_logger.error(f"Prompt type '{prompt_type}' not found.")
return ""
@@ -22,7 +21,9 @@ def get_prompt_context(prompts: dict, prompt_type: str, context: dict) -> str:
def get_prompt_template(prompts: dict, prompt_type: str) -> str:
- """Retrieves the template for the given prompt type."""
+ """
+ Retrieves the template for the given prompt type.
+ """
prompt_templates = {
"features": prompts["prompts"]["features"],
"overview": prompts["prompts"]["overview"],
@@ -32,7 +33,9 @@ def get_prompt_template(prompts: dict, prompt_type: str) -> str:
def inject_prompt_context(template: str, context: dict) -> str:
- """Formats the template with the provided context."""
+ """
+ Formats the template with the provided context.
+ """
try:
return template.format(*[context[key] for key in context])
except KeyError as exc:
@@ -42,10 +45,12 @@ def inject_prompt_context(template: str, context: dict) -> str:
async def set_additional_contexts(
config: Settings,
- dependencies: List[str],
- file_summaries: List[str],
-) -> List[dict]:
- """Generates additional prompts (features, overview, slogan) for LLM."""
+ dependencies: list[str],
+ file_summaries: list[tuple[str, str]],
+) -> list[dict]:
+ """
+ Generates additional prompts (features, overview, slogan) for LLM.
+ """
return [
{"type": prompt_type, "context": context}
for prompt_type, context in [
@@ -78,10 +83,12 @@ async def set_additional_contexts(
async def set_summary_context(
config: Settings,
- dependencies: List[str],
- file_summaries: List[str],
-) -> List[Dict[str, Union[str, dict]]]:
- """Generates the summary prompts to be used by the LLM API."""
+ dependencies: list[str],
+ file_summaries: list[str],
+) -> list[dict]:
+ """
+ Generates the summary prompts to be used by the LLM API.
+ """
return [
{"type": prompt_type, "context": context}
for prompt_type, context in [
diff --git a/readmeai/models/tokens.py b/readmeai/models/tokens.py
index 86446fc6..f66ea6e2 100644
--- a/readmeai/models/tokens.py
+++ b/readmeai/models/tokens.py
@@ -2,7 +2,7 @@
Tokenizer utilities for tokenizing and truncating text.
"""
-from tiktoken import get_encoding
+from tiktoken import Encoding, get_encoding
from readmeai.config.settings import Settings
from readmeai.core.logger import Logger
@@ -11,7 +11,7 @@
_logger = Logger(__name__)
-def _set_encoding_cache(encoding_name: str) -> str:
+def _set_encoding_cache(encoding_name: str) -> Encoding:
"""Set the encoding cache for a specific encoding."""
if encoding_name not in _encoding_cache:
_encoding_cache[encoding_name] = get_encoding(encoding_name)
@@ -19,7 +19,10 @@ def _set_encoding_cache(encoding_name: str) -> str:
async def token_handler(
- config: Settings, index: str, prompt: str, tokens: int
+ config: Settings,
+ index: str,
+ prompt: str,
+ tokens: int,
) -> str:
"""Handle token count for the prompt."""
encoder = config.llm.encoder
@@ -28,7 +31,7 @@ async def token_handler(
if token_count > max_count:
_logger.debug(
- f"Truncating '{index}' prompt: {token_count} > {max_count} tokens!"
+ f"Truncating '{index}' prompt: {token_count} > {max_count} tokens!",
)
prompt = truncate_tokens(encoder, prompt, tokens)
@@ -43,19 +46,19 @@ def count_tokens(text: str, encoder: str) -> int:
except (UnicodeEncodeError, ValueError) as exc:
_logger.error(
- f"Error counting tokens for '{text}' with {encoder}: {exc}"
+ f"Error counting tokens for '{text}' with {encoder}: {exc}",
)
token_count = 0
return token_count
-def truncate_tokens(encoder: str, text: str, max_count: int) -> str:
+def truncate_tokens(encoding: str, text: str, max_count: int) -> str:
"""Truncate a text string to a maximum number of tokens."""
if not text:
return text
try:
- encoder = _set_encoding_cache(encoder)
+ encoder = _set_encoding_cache(encoding)
token_count = len(encoder.encode(text))
if token_count <= max_count:
return text
@@ -70,7 +73,9 @@ def truncate_tokens(encoder: str, text: str, max_count: int) -> str:
def update_max_tokens(
- max_tokens: int, prompt: str, target: str = "Hello!"
+ max_tokens: int,
+ prompt: str,
+ target: str = "Hello!",
) -> int:
"""Adjust the maximum number of tokens based on the specific prompt."""
is_valid_prompt = prompt.strip().startswith(target.strip())
diff --git a/readmeai/parsers/cicd/bitbucket.py b/readmeai/parsers/cicd/bitbucket.py
deleted file mode 100644
index caf214b4..00000000
--- a/readmeai/parsers/cicd/bitbucket.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for Bitbucket Pipelines (bitbucket-pipelines.yml) configuration files."""
diff --git a/readmeai/parsers/cicd/circleci.py b/readmeai/parsers/cicd/circleci.py
deleted file mode 100644
index 39d49b91..00000000
--- a/readmeai/parsers/cicd/circleci.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for .circleci/config.yml configuration files."""
diff --git a/readmeai/parsers/cicd/github.py b/readmeai/parsers/cicd/github.py
deleted file mode 100644
index 6c46dc00..00000000
--- a/readmeai/parsers/cicd/github.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for GitHub Actions (.github/workflows/) configuration files."""
diff --git a/readmeai/parsers/cicd/gitlab.py b/readmeai/parsers/cicd/gitlab.py
deleted file mode 100644
index 2bdcbb22..00000000
--- a/readmeai/parsers/cicd/gitlab.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for .gitlab-ci.yml configuration files."""
diff --git a/readmeai/parsers/cicd/jenkins.py b/readmeai/parsers/cicd/jenkins.py
deleted file mode 100644
index 97912d5b..00000000
--- a/readmeai/parsers/cicd/jenkins.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for Jenkinsfile (Jenkinsfile) configuration files."""
diff --git a/readmeai/parsers/cicd/travis.py b/readmeai/parsers/cicd/travis.py
deleted file mode 100644
index 1d4cbc55..00000000
--- a/readmeai/parsers/cicd/travis.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for .travis.yml configuration files."""
diff --git a/readmeai/parsers/configuration/ansible.py b/readmeai/parsers/configuration/ansible.py
deleted file mode 100644
index bf1ce6a6..00000000
--- a/readmeai/parsers/configuration/ansible.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for Ansible (playbook.yml, ansible/site.yml) configuration files."""
diff --git a/readmeai/parsers/configuration/apache.py b/readmeai/parsers/configuration/apache.py
deleted file mode 100644
index e9fb3a3a..00000000
--- a/readmeai/parsers/configuration/apache.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for apache.py (httpd.conf) configuration files."""
diff --git a/readmeai/parsers/configuration/nginx.py b/readmeai/parsers/configuration/nginx.py
deleted file mode 100644
index 815324bf..00000000
--- a/readmeai/parsers/configuration/nginx.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for Nginx (nginx.conf) configuration files."""
diff --git a/readmeai/parsers/language/cpp.py b/readmeai/parsers/cpp.py
similarity index 81%
rename from readmeai/parsers/language/cpp.py
rename to readmeai/parsers/cpp.py
index ee0000fc..8c68d574 100644
--- a/readmeai/parsers/language/cpp.py
+++ b/readmeai/parsers/cpp.py
@@ -1,19 +1,22 @@
-"""Dependency file parsers for C/C++ projects."""
+"""
+Dependency file parsers for C/C++ projects.
+"""
import re
-from typing import List
from readmeai.core.parsers import BaseFileParser
class CMakeParser(BaseFileParser):
- """Parser for CMake dependency files."""
+ """
+ Parser for CMake dependency files.
+ """
def __init__(self) -> None:
"""Initializes the handler with given configuration."""
super().__init__()
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts dependencies, libs, and software from a CMakeLists.txt."""
try:
extracted_dependencies = []
@@ -30,35 +33,40 @@ def parse(self, content: str) -> List[str]:
extracted_dependencies.extend(dependencies)
if line.startswith("target_link_libraries") or line.startswith(
- "find_package"
+ "find_package",
):
libs = re.findall(
- r"target_link_libraries\([^)]+\s+([^)]+)\)", line
+ r"target_link_libraries\([^)]+\s+([^)]+)\)",
+ line,
)
extracted_dependencies.extend(libs)
return list(set(extracted_dependencies))
except re.error as exc:
- return self.handle_parsing_error(f"CMakeLists.txt: {str(exc)}")
+ return self.handle_parsing_error(f"CMakeLists.txt: {exc!s}")
class ConfigureAcParser(BaseFileParser):
- """Parser for configure.ac dependency files."""
+ """
+ Parser for configure.ac dependency files.
+ """
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts package names from a configure.ac file."""
try:
regex = re.compile(r"AC_CHECK_LIB\([^)]+\s+([^)]+)\)")
return regex.findall(content)
except re.error as exc:
- return self.handle_parsing_error(f"configure.ac: {str(exc)}")
+ return self.handle_parsing_error(f"configure.ac: {exc!s}")
class MakefileAmParser(BaseFileParser):
- """Parser for Makefile dependency files."""
+ """
+ Parser for Makefile dependency files.
+ """
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts dependencies from Makefile.am files."""
try:
extracted_packages = []
@@ -89,4 +97,4 @@ def parse(self, content: str) -> List[str]:
return list(set(extracted_packages))
except re.error as exc:
- return self.handle_parsing_error(f"Makefile.am: {str(exc)}")
+ return self.handle_parsing_error(f"Makefile.am: {exc!s}")
diff --git a/readmeai/parsers/configuration/docker.py b/readmeai/parsers/docker.py
similarity index 74%
rename from readmeai/parsers/configuration/docker.py
rename to readmeai/parsers/docker.py
index 9b31b7de..c7e99b65 100644
--- a/readmeai/parsers/configuration/docker.py
+++ b/readmeai/parsers/docker.py
@@ -1,7 +1,8 @@
-"""Parser for Docker (Dockerfile, docker-compose.yaml) configuration files."""
+"""
+Parser for Docker (Dockerfile, docker-compose.yaml) configuration files.
+"""
import re
-from typing import List
import yaml
@@ -9,13 +10,15 @@
class DockerfileParser(BaseFileParser):
- """Parser for Dockerfile dependency files."""
+ """
+ Parser for Dockerfile dependency files.
+ """
def __init__(self) -> None:
"""Initializes the handler with given configuration."""
super().__init__()
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts package names from a Dockerfile."""
try:
dependencies = []
@@ -35,13 +38,15 @@ def parse(self, content: str) -> List[str]:
return dependencies
except re.error as exc:
- return self.handle_parsing_error(f"Dockerfile: {str(exc)}")
+ return self.handle_parsing_error(f"Dockerfile: {exc!s}")
class DockerComposeParser(BaseFileParser):
- """Parser for Docker related files."""
+ """
+ Parser for Docker related files.
+ """
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Parse docker-compose.yaml file and return a list of services."""
try:
data = yaml.safe_load(content)
@@ -49,4 +54,4 @@ def parse(self, content: str) -> List[str]:
return list(data["services"].keys())
except yaml.YAMLError as exc:
- return self.handle_parsing_error(f"Dockerfile: {str(exc)}")
+ return self.handle_parsing_error(f"Dockerfile: {exc!s}")
diff --git a/readmeai/parsers/factory.py b/readmeai/parsers/factory.py
index 5af521b2..51e03289 100644
--- a/readmeai/parsers/factory.py
+++ b/readmeai/parsers/factory.py
@@ -1,78 +1,75 @@
-"""Abstract factory module for all project file parsers."""
-
-from typing import Dict, Type
+"""
+Abstract factory module for all project file parsers.
+"""
from readmeai.core.parsers import BaseFileParser
-from readmeai.parsers.configuration.docker import (
- DockerComposeParser,
- DockerfileParser,
-)
-from readmeai.parsers.configuration.properties import PropertiesParser
-from readmeai.parsers.language.cpp import (
+from readmeai.parsers.cpp import (
CMakeParser,
ConfigureAcParser,
MakefileAmParser,
)
-from readmeai.parsers.language.go import GoModParser
-from readmeai.parsers.language.python import (
- RequirementsParser,
- TomlParser,
- YamlParser,
+from readmeai.parsers.docker import (
+ DockerComposeParser,
+ DockerfileParser,
)
-from readmeai.parsers.language.rust import CargoTomlParser
-from readmeai.parsers.language.swift import SwiftPackageParser
-from readmeai.parsers.package.gradle import (
+from readmeai.parsers.go import GoModParser
+from readmeai.parsers.gradle import (
BuildGradleKtsParser,
BuildGradleParser,
)
-from readmeai.parsers.package.maven import MavenParser
-from readmeai.parsers.package.npm import PackageJsonParser
-from readmeai.parsers.package.yarn import YarnLockParser
-
-ParserRegistryType = dict[str, Type[BaseFileParser]]
+from readmeai.parsers.maven import MavenParser
+from readmeai.parsers.npm import PackageJsonParser
+from readmeai.parsers.properties import PropertiesParser
+from readmeai.parsers.python import (
+ RequirementsParser,
+ TomlParser,
+ YamlParser,
+)
+from readmeai.parsers.rust import CargoTomlParser
+from readmeai.parsers.swift import SwiftPackageParser
+from readmeai.parsers.yarn import YarnLockParser
-PARSER_REGISTRY = {
- # Configuration
- ".properties": PropertiesParser,
- # Language/Framework
- # Python
- "Pipfile": TomlParser(),
- "pyproject.toml": TomlParser(),
- "requirements.in": RequirementsParser(),
- "requirements.txt": RequirementsParser(),
- "requirements-dev.txt": RequirementsParser(),
- "requirements-test.txt": RequirementsParser(),
- "requirements-prod.txt": RequirementsParser(),
- "dev-requirements.txt": RequirementsParser(),
- "environment.yml": YamlParser(),
- "environment.yaml": YamlParser(),
- # "setup.py": setup_py_parser,
- # "setup.cfg": setup_cfg_parser,
- # C/C++
- "cmakeLists.txt": CMakeParser(),
- "configure.ac": ConfigureAcParser(),
- "Makefile.am": MakefileAmParser(),
- # JavaScript/Node.js
- "package.json": PackageJsonParser(),
- "yarn.lock": YarnLockParser(),
- # Kotlin and Kotlin DSL
- "build.gradle": BuildGradleParser(),
- "build.gradle.kts": BuildGradleKtsParser(),
- # Go
- "go.mod": GoModParser(),
- # Java
- "pom.xml": MavenParser(),
- # Rust
- "cargo.toml": CargoTomlParser(),
- # Swift
- "Package.swift": SwiftPackageParser(),
- "Dockerfile": DockerfileParser(),
- "docker-compose.yaml": DockerComposeParser(),
- # Package Managers
- # Monitoring and Logging
-}
+ParserRegistryType = dict[str, BaseFileParser]
-def parser_handler() -> Dict[str, BaseFileParser]:
- """Returns a dictionary of callable file parser methods."""
- return PARSER_REGISTRY
+def parser_handler() -> ParserRegistryType:
+ """
+ Returns a dictionary of callable file parser methods.
+ """
+ return {
+ # Python
+ "Pipfile": TomlParser(),
+ "pyproject.toml": TomlParser(),
+ "requirements.in": RequirementsParser(),
+ "requirements.txt": RequirementsParser(),
+ "requirements-dev.txt": RequirementsParser(),
+ "requirements-test.txt": RequirementsParser(),
+ "requirements-prod.txt": RequirementsParser(),
+ "dev-requirements.txt": RequirementsParser(),
+ "environment.yml": YamlParser(),
+ "environment.yaml": YamlParser(),
+ # "setup.py": setup_py_parser,
+ # "setup.cfg": setup_cfg_parser,
+ # C/C++
+ "cmakeLists.txt": CMakeParser(),
+ "configure.ac": ConfigureAcParser(),
+ "Makefile.am": MakefileAmParser(),
+ # JavaScript/Node.js
+ "package.json": PackageJsonParser(),
+ "yarn.lock": YarnLockParser(),
+ # Kotlin/Kotlin DSL
+ "build.gradle": BuildGradleParser(),
+ "build.gradle.kts": BuildGradleKtsParser(),
+ # Go
+ "go.mod": GoModParser(),
+ # Java
+ "pom.xml": MavenParser(),
+ # Rust
+ "cargo.toml": CargoTomlParser(),
+ # Swift
+ "Package.swift": SwiftPackageParser(),
+ # Docker
+ "Dockerfile": DockerfileParser(),
+ "docker-compose.yaml": DockerComposeParser(),
+ ".properties": PropertiesParser(),
+ }
diff --git a/readmeai/parsers/language/go.py b/readmeai/parsers/go.py
similarity index 75%
rename from readmeai/parsers/language/go.py
rename to readmeai/parsers/go.py
index 715d6815..656f353a 100644
--- a/readmeai/parsers/language/go.py
+++ b/readmeai/parsers/go.py
@@ -1,19 +1,22 @@
-"""Parse package dependencies from go.mod files."""
+"""
+Parser for go.mod dependency files.
+"""
import re
-from typing import List
from readmeai.core.parsers import BaseFileParser
class GoModParser(BaseFileParser):
- """Parser for go.mod files."""
+ """
+ Parser for go.mod files.
+ """
def __init__(self) -> None:
"""Initializes the handler with given configuration."""
super().__init__()
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Parse the content of a go.mod file."""
try:
lines = content.split("\n")
@@ -27,4 +30,4 @@ def parse(self, content: str) -> List[str]:
return list(package_names)
except Exception as exc:
- return self.handle_parsing_error(f"go.mod: {str(exc)}")
+ return self.handle_parsing_error(f"go.mod: {exc!s}")
diff --git a/readmeai/parsers/package/gradle.py b/readmeai/parsers/gradle.py
similarity index 81%
rename from readmeai/parsers/package/gradle.py
rename to readmeai/parsers/gradle.py
index 613a5a9b..da27d28a 100644
--- a/readmeai/parsers/package/gradle.py
+++ b/readmeai/parsers/gradle.py
@@ -1,19 +1,22 @@
-"""Parser for gradle dependency files."""
+"""
+Parser for gradle dependency files.
+"""
import re
-from typing import List
from readmeai.core.parsers import BaseFileParser
class BuildGradleParser(BaseFileParser):
- """Parser for build.gradle dependency files."""
+ """
+ Parser for build.gradle dependency files.
+ """
def __init__(self) -> None:
"""Initializes the handler with given configuration."""
super().__init__()
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts package names from a build.gradle file."""
try:
pattern = r"(implementation|classpath|api|testImplementation|androidTestImplementation|kapt)\s+['\"]([^'\"]+)['\"]"
@@ -30,13 +33,15 @@ def parse(self, content: str) -> List[str]:
return list(package_names)
except re.error as exc:
- return self.handle_parsing_error(f"build.gradle: {str(exc)}")
+ return self.handle_parsing_error(f"build.gradle: {exc!s}")
class BuildGradleKtsParser(BaseFileParser):
- """Parser for build.gradle.kts dependency files."""
+ """
+ Parser for build.gradle.kts dependency files.
+ """
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extracts package names from a build.gradle.kts file."""
try:
pattern = r"(\bimplementation|testImplementation)\s*\((['\"])([^'\"]+)\2\)"
@@ -52,4 +57,4 @@ def parse(self, content: str) -> List[str]:
return list(package_names)
except re.error as error:
- return self.handle_parsing_error(f"build.gradle.kts: {str(error)}")
+ return self.handle_parsing_error(f"build.gradle.kts: {error!s}")
diff --git a/readmeai/parsers/infrastructure/cloudformation.py b/readmeai/parsers/infrastructure/cloudformation.py
deleted file mode 100644
index 063fd2c1..00000000
--- a/readmeai/parsers/infrastructure/cloudformation.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for cloudformation.yaml (AWS CloudFormation) configuration files."""
diff --git a/readmeai/parsers/infrastructure/terraform.py b/readmeai/parsers/infrastructure/terraform.py
deleted file mode 100644
index 6e873666..00000000
--- a/readmeai/parsers/infrastructure/terraform.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Parser for main.tf (Terraform) configuration files."""
diff --git a/readmeai/parsers/package/maven.py b/readmeai/parsers/maven.py
similarity index 70%
rename from readmeai/parsers/package/maven.py
rename to readmeai/parsers/maven.py
index 316e16a0..c3f63c3a 100644
--- a/readmeai/parsers/package/maven.py
+++ b/readmeai/parsers/maven.py
@@ -1,23 +1,26 @@
-"""Parser utilities for Java-based dependency files."""
+"""
+Parser utilities for Java-based dependency files.
+"""
import re
-from typing import List
from readmeai.core.parsers import BaseFileParser
class MavenParser(BaseFileParser):
- """Parser for Maven dependency files in pom.xml format."""
+ """
+ Parser for Maven dependency files in pom.xml format.
+ """
def __init__(self) -> None:
"""Initializes the handler with given configuration."""
super().__init__()
- def parse(self, content: str) -> List[str]:
+ def parse(self, content: str) -> list[str]:
"""Extract packages names from Maven pom.xml files."""
try:
regex = re.compile(
- r"{repo_name}
+
+ {slogan} +
+\n\t{shields_icons}
+\n\t{badge_icons}
+ +\n\t{shields_icons}
+ +\n\t{shields_icons}
+\n\t{badge_icons}
+ +โบ INSERT-TEXT-HERE
"
- assert mock_post.call_count == 1
+
+ @patch("readmeai.models.openai.aiohttp.ClientSession.post")
+ async def run_test(error, mock_post):
+ mock_post.side_effect = error
+ openai_handler._session = MagicMock(spec=aiohttp.ClientSession)
+ openai_handler._session.post = mock_post
+
+ index, result = await openai_handler._make_request(
+ "test_index",
+ "test_prompt",
+ 100,
+ None,
+ )
+
+ assert index == "test_index"
+ assert result == mock_config.md.placeholder
+ assert mock_post.call_count == 1
+
+ await run_test(aiohttp.ClientError())
+ await run_test(aiohttp.ClientConnectionError())
+ await run_test(openai.OpenAIError())
diff --git a/tests/models/test_prompts.py b/tests/models/test_prompts.py
index 40476edc..b9a5309e 100644
--- a/tests/models/test_prompts.py
+++ b/tests/models/test_prompts.py
@@ -17,15 +17,20 @@
def test_get_prompt_context_found(mock_config, mock_configs):
"""Test the retrieval of a prompt context."""
- with patch(
- "readmeai.models.prompts.get_prompt_template",
- return_value="Hello, {name}!",
- ), patch(
- "readmeai.models.prompts.inject_prompt_context",
- return_value="Hello, World!",
+ with (
+ patch(
+ "readmeai.models.prompts.get_prompt_template",
+ return_value="Hello, {name}!",
+ ),
+ patch(
+ "readmeai.models.prompts.inject_prompt_context",
+ return_value="Hello, World!",
+ ),
):
result = get_prompt_context(
- mock_configs.prompts, "greeting", {"name": "World"}
+ mock_configs.prompts,
+ "greeting",
+ {"name": "World"},
)
assert result == "Hello, World!"
@@ -43,7 +48,10 @@ def test_get_prompt_template(mock_config, mock_configs):
def test_inject_prompt_context_success(
- mock_config, mock_configs, mock_dependencies, mock_summaries
+ mock_config,
+ mock_configs,
+ mock_dependencies,
+ mock_summaries,
):
"""Test the injection of a prompt context."""
context = get_prompt_context(
diff --git a/tests/parsers/cicd/__init__.py b/tests/parsers/cicd/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_bitbucket.py b/tests/parsers/cicd/test_bitbucket.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_circleci.py b/tests/parsers/cicd/test_circleci.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_github.py b/tests/parsers/cicd/test_github.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_gitlab.py b/tests/parsers/cicd/test_gitlab.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_jenkins.py b/tests/parsers/cicd/test_jenkins.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/cicd/test_travis.py b/tests/parsers/cicd/test_travis.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/configuration/__init__.py b/tests/parsers/configuration/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/configuration/test_ansible.py b/tests/parsers/configuration/test_ansible.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/configuration/test_apache.py b/tests/parsers/configuration/test_apache.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/configuration/test_nginx.py b/tests/parsers/configuration/test_nginx.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/infrastructure/__init__.py b/tests/parsers/infrastructure/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/infrastructure/test_cloudformation.py b/tests/parsers/infrastructure/test_cloudformation.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/infrastructure/test_terraform.py b/tests/parsers/infrastructure/test_terraform.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/language/__init__.py b/tests/parsers/language/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/orchestration/__init__.py b/tests/parsers/orchestration/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/orchestration/test_kubernetes.py b/tests/parsers/orchestration/test_kubernetes.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/__init__.py b/tests/parsers/package/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/test_composer.py b/tests/parsers/package/test_composer.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/test_gem.py b/tests/parsers/package/test_gem.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/test_nuget.py b/tests/parsers/package/test_nuget.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/test_pip.py b/tests/parsers/package/test_pip.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/tests/parsers/package/test_yarn.py b/tests/parsers/package/test_yarn.py
deleted file mode 100644
index e29a0b01..00000000
--- a/tests/parsers/package/test_yarn.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Tests for the yarn.lock file parser module."""
diff --git a/tests/parsers/language/test_cpp.py b/tests/parsers/test_cpp.py
similarity index 98%
rename from tests/parsers/language/test_cpp.py
rename to tests/parsers/test_cpp.py
index 2ec57839..f244ca3e 100644
--- a/tests/parsers/language/test_cpp.py
+++ b/tests/parsers/test_cpp.py
@@ -2,7 +2,7 @@
import pytest
-from readmeai.parsers.language.cpp import (
+from readmeai.parsers.cpp import (
CMakeParser,
ConfigureAcParser,
MakefileAmParser,
@@ -191,7 +191,7 @@ def test_cmake_parser_invalid(cmake_parser):
@pytest.mark.skip
def test_configureac_parser_valid(configureac_parser, content_configureac):
extracted_packages = configureac_parser.parse(
- content_configureac.read_text()
+ content_configureac.read_text(),
)
expected_packages = ["mp", "clock_gettime", "rt", "dl", "pthread"]
assert sorted(extracted_packages) == sorted(expected_packages)
@@ -204,7 +204,7 @@ def test_configureac_parser_invalid(configureac_parser):
def test_makefile_am_parser_valid(makefile_am_parser, content_makefileam):
extracted_packages = makefile_am_parser.parse(
- content_makefileam.read_text()
+ content_makefileam.read_text(),
)
# expected_packages = ["my_program", "libfoo", "check"]
assert "my_program" in extracted_packages
diff --git a/tests/parsers/configuration/test_docker.py b/tests/parsers/test_docker.py
similarity index 98%
rename from tests/parsers/configuration/test_docker.py
rename to tests/parsers/test_docker.py
index 698f208f..cf6dc21c 100644
--- a/tests/parsers/configuration/test_docker.py
+++ b/tests/parsers/test_docker.py
@@ -2,7 +2,7 @@
import pytest
-from readmeai.parsers.configuration.docker import (
+from readmeai.parsers.docker import (
DockerComposeParser,
DockerfileParser,
)
diff --git a/tests/parsers/test_factory.py b/tests/parsers/test_factory.py
index 2130d4f6..4288da63 100644
--- a/tests/parsers/test_factory.py
+++ b/tests/parsers/test_factory.py
@@ -1,7 +1,7 @@
"""Test cases for the file_parser module."""
from readmeai.parsers.factory import parser_handler
-from readmeai.parsers.language.python import RequirementsParser, TomlParser
+from readmeai.parsers.python import RequirementsParser, TomlParser
def test_parser_handler():
diff --git a/tests/parsers/language/test_go.py b/tests/parsers/test_go.py
similarity index 90%
rename from tests/parsers/language/test_go.py
rename to tests/parsers/test_go.py
index 9752ee45..3055fa7c 100644
--- a/tests/parsers/language/test_go.py
+++ b/tests/parsers/test_go.py
@@ -1,6 +1,6 @@
"""Unit tests for Go-based dependency parsers."""
-from readmeai.parsers.language.go import GoModParser
+from readmeai.parsers.go import GoModParser
content = """
module geekdemo
diff --git a/tests/parsers/package/test_gradle.py b/tests/parsers/test_gradle.py
similarity index 97%
rename from tests/parsers/package/test_gradle.py
rename to tests/parsers/test_gradle.py
index df74a57c..efb7ac5a 100644
--- a/tests/parsers/package/test_gradle.py
+++ b/tests/parsers/test_gradle.py
@@ -1,6 +1,6 @@
"""Unit tests for parsing build.gradle files."""
-from readmeai.parsers.package.gradle import (
+from readmeai.parsers.gradle import (
BuildGradleKtsParser,
BuildGradleParser,
)
@@ -114,7 +114,7 @@ def test_build_gradle():
"gradle",
"jfrog",
"tools",
- ]
+ ],
)
@@ -131,5 +131,5 @@ def test_build_gradle_kts():
"ext",
"androidx",
"benchmark",
- ]
+ ],
)
diff --git a/tests/parsers/package/test_maven.py b/tests/parsers/test_maven.py
similarity index 98%
rename from tests/parsers/package/test_maven.py
rename to tests/parsers/test_maven.py
index d2cee9f9..9d29b4aa 100644
--- a/tests/parsers/package/test_maven.py
+++ b/tests/parsers/test_maven.py
@@ -3,7 +3,7 @@
import re
from unittest.mock import patch
-from readmeai.parsers.package.maven import MavenParser
+from readmeai.parsers.maven import MavenParser
content = """