Skip to content

Commit

Permalink
feat: impose strict linter rules via ruff (#8)
Browse files Browse the repository at this point in the history
* feat: impose strict linter rules via ruff

* chore: add CI

* fix: ruff check --fix

* format: ruff format .

* chore: remove useless files

* chore: follow the ruff rules

* Updated backend-ci.yml to include pre-commit checks for flake8, mypy, pydantic, types-requests, and types-redis.

* chore: adjust steps order in backend-ci.yml

* chore: add config path

* chore: updated pre-commit action to include all files and specified configuration file.

* chore: updated pre-commit action to remove --all-files flag in backend-ci.yml.

* chore: updated pre-commit action to include all files in the backend directory for linting.

* chore: updated pre-commit configuration to only run on files within the backend directory.

* chore: updated backend-ci.yml to include installation of ruff package for linting and formatting.

* chore: lint config for backend

* chore: update pre-commit configuration to use pyproject.toml instead of setup.cfg

* chore: refactor pre-commit config to use pyproject.toml instead of setup.cfg in backend directory.

* chore: updated pre-commit job in backend-ci.yml to remove unnecessary configuration for working directory and specify files directly for pre-commit checks.

* chore: updated mypy version to 1.10.0 and added new error codes to disable in pyproject.toml

---------

Co-authored-by: Thomas <wxy_000000@qq.com>
Co-authored-by: BirdRing <zhkkun@gmail.com>
  • Loading branch information
3 people authored Apr 24, 2024
1 parent ba577f0 commit 46f9fde
Show file tree
Hide file tree
Showing 41 changed files with 1,240 additions and 1,569 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/backend-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: backend code quality

on:
pull_request:
push:
branches:
- 'main'
- 'prod'

jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
- run: pip install flake8 mypy pydantic types-requests types-redis ruff
- uses: pre-commit/action@v3.0.0
with:
extra_args: --files ./backend/*
54 changes: 54 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-ast
files: ^backend/
- id: check-case-conflict
files: ^backend/
- id: check-docstring-first
files: ^backend/
- id: check-executables-have-shebangs
files: ^backend/
- id: check-json
files: ^backend/
- id: check-added-large-files
files: ^backend/
- id: pretty-format-json
args:
- "--autofix"
- "--indent=4"
files: ^backend/
- id: detect-private-key
files: ^backend/
- id: debug-statements
files: ^backend/
- id: end-of-file-fixer
files: ^backend/
- id: trailing-whitespace
files: ^backend/
- repo: local
hooks:
- id: mypy
name: mypy
entry: mypy .
require_serial: true
language: system
types: [ python ]
files: ^backend/
pass_filenames: false
args: [--config-file=backend/pyproject.toml]
- id: ruff-lint
name: ruff-lint
entry: ruff check --fix
require_serial: true
language: system
files: ^backend/
types: [ python ]
- id: ruff-format
name: ruff-format
entry: ruff format
require_serial: true
language: system
files: ^backend/
types: [ python ]
2 changes: 1 addition & 1 deletion backend/.env.example
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Usage: Copy this file to .env and fill in the values
# Model name, required for inference.
# Model name, required for inference.
# For OpenAI GPT, use "gpt-4-1106-preview" or "gpt-3.5-turbo-1106" as model name.
# For local LLM, Those models with Ollama inference are tested and recommended: "solar:10.7b", "codellama:13b", "llava:13b", "deepseek-coder:33b". Other models are not tested and may not work as expected.
MODEL_NAME=llava:13b
Expand Down
71 changes: 0 additions & 71 deletions backend/.pre-commit-config.yaml

This file was deleted.

3 changes: 1 addition & 2 deletions backend/openagent/agent/cache.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,2 @@
def init_cache():
# langchain.llm_cache = InMemoryCache()
def init_cache() -> None:
pass
16 changes: 4 additions & 12 deletions backend/openagent/agent/function_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from langchain.agents import AgentExecutor, initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI, ChatOllama
from langchain.memory import ConversationBufferMemory, ChatMessageHistory
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from langchain.chat_models import ChatOllama, ChatOpenAI
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from toolz import memoize
Expand All @@ -11,15 +11,11 @@
from openagent.conf.env import settings
from openagent.experts.account_expert import AccountExpert
from openagent.experts.collection_expert import CollectionExpert
from openagent.experts.dapp_expert import DappExpert
from openagent.experts.executor_expert import ExecutorExpert
from openagent.experts.feed_expert import FeedExpert
from openagent.experts.google_expert import GoogleExpert
from openagent.experts.network_expert import NetworkExpert
from openagent.experts.hoot_expert import HootExpert
from openagent.experts.swap_expert import SwapExpert
from openagent.experts.token_expert import TokenExpert
from openagent.experts.transfer_expert import TransferExpert
from openagent.experts.hoot_expert import HootExpert

init_cache()

Expand All @@ -39,15 +35,11 @@ def get_agent(session_id: str) -> AgentExecutor:
# load Experts as tools for the agent
experts = [
GoogleExpert(),
NetworkExpert(),
FeedExpert(),
CollectionExpert(),
TokenExpert(),
DappExpert(),
AccountExpert(),
SwapExpert(),
TransferExpert(),
ExecutorExpert(),
HootExpert(),
]

Expand Down
5 changes: 1 addition & 4 deletions backend/openagent/agent/postgres_history.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
import json
import logging
from typing import List

from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage, messages_from_dict
from toolz.curried import compose_left, map, filter
from langchain.schema.messages import BaseMessage

from openagent.db.database import DBSession
from openagent.db.models import ChatHistory

logger = logging.getLogger(__name__)
Expand Down
5 changes: 2 additions & 3 deletions backend/openagent/agent/session_title.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI, ChatOllama
from langchain.chat_models import ChatOllama, ChatOpenAI
from langchain.prompts import PromptTemplate
from loguru import logger

from openagent.conf.env import settings
from openagent.db.database import DBSession
from openagent.db.models import ChatSession
from openagent.conf.env import settings

load_dotenv()

Expand Down Expand Up @@ -48,7 +48,6 @@ async def agen_session_title(user_id: str, session_id: str, history: str) -> lis


if __name__ == "__main__":

import asyncio

asyncio.run(agen_session_title("123", "456", "what's your name ?"))
17 changes: 13 additions & 4 deletions backend/openagent/agent/stream_callback.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import asyncio
import json
import uuid
from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast, Optional
from typing import Any, AsyncIterator, Dict, List, Literal, Optional, Union, cast
from uuid import UUID

from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import AgentFinish, _message_to_dict, BaseMessage, LLMResult
from langchain.schema import AgentFinish, BaseMessage, LLMResult, _message_to_dict

from openagent.agent.ctx_var import resp_msg_id, chat_req_ctx
from openagent.agent.ctx_var import chat_req_ctx, resp_msg_id
from openagent.db.database import DBSession
from openagent.db.models import ChatHistory
from openagent.dto.cb_content import CbContent, CbContentType
Expand Down Expand Up @@ -225,9 +225,18 @@ async def on_agent_finish(
self.done.set()

async def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
self.done.set()
return await super().on_llm_error(
error, run_id=run_id, parent_run_id=parent_run_id, tags=tags, **kwargs
)

# TODO implement the other methods

Expand Down
6 changes: 3 additions & 3 deletions backend/openagent/agent/suggested_question.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI, ChatOllama
from langchain.chat_models import ChatOllama, ChatOpenAI
from langchain.prompts import PromptTemplate
from loguru import logger

Expand All @@ -14,7 +14,7 @@
async def agen_suggested_questions(user_id: str, history: str) -> list[str]:
prompt = PromptTemplate(
template="""
Suggest follow up questions based on the user chat history.
Suggest follow up questions based on the user chat history.
Return Format:
["question1", "question2", "question3"]
Expand All @@ -34,7 +34,7 @@ async def agen_suggested_questions(user_id: str, history: str) -> list[str]:
-----------------------------------------------------------------
Q:
{history}
A:""",
A:""", # noqa
input_variables=["history"],
)
if settings.MODEL_NAME.startswith("gpt"):
Expand Down
6 changes: 3 additions & 3 deletions backend/openagent/agent/system_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
When use transfer or swap tool, you should ask the user to edit or confirm the transaction, \
and don't show the transaction link to the user.
Return format
Return format:
You are committed to providing responses in markdown format for enhanced readability.
"""

Expand All @@ -27,7 +27,7 @@
Here are tools' schemas:
""",
"format_instructions": """
"format_instructions": r"""
When responding, you must exclusively use one of the following two formats:
Expand All @@ -51,7 +51,7 @@
}}}}
```
"action\_input" is illegal, never escape it with a backslash.
"action\_input" is illegal, never escape it with a backslash.
""",
"suffix": """
REMEMBER to respond with a markdown code snippet of a json \
Expand Down
26 changes: 1 addition & 25 deletions backend/openagent/app.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,13 @@
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import HTMLResponse
from starlette.staticfiles import StaticFiles

from openagent.router.chat import chat_router
from openagent.router.onboarding import onboarding_router
from openagent.router.session import session_router
from openagent.router.task import task_router
from openagent.service.task import check_task_status

load_dotenv()
app = FastAPI(
title="OpenAgent",
description="""
### Task notification websocket API
- **URL**: `/tasks/notifications/{user_id}`
- **Websocket call example**: `/tasks/ws-test`
""",
)
app = FastAPI(title="OpenAgent", description="")

app.add_middleware(
CORSMiddleware,
Expand All @@ -31,16 +20,3 @@
app.include_router(onboarding_router)
app.include_router(chat_router)
app.include_router(session_router)
app.include_router(task_router)

app.mount("/static", StaticFiles(directory="static"), name="static")


@app.get("/tasks/ws-test", response_class=HTMLResponse, include_in_schema=False)
async def get_websocket_test_page():
return HTMLResponse(content=open("./static/websocket_test.html", "r").read())


@app.on_event("startup")
async def startup_event():
check_task_status()
2 changes: 1 addition & 1 deletion backend/openagent/db/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean
from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text
from sqlalchemy.orm import declarative_base
from sqlalchemy.sql import func

Expand Down
Loading

0 comments on commit 46f9fde

Please sign in to comment.