Skip to content

Commit

Permalink
Merge pull request #34 from gomate-community/pipeline
Browse files Browse the repository at this point in the history
feature@rag baseline&citation
  • Loading branch information
yanqiangmiffy authored Jun 25, 2024
2 parents 413f67b + 4701c3a commit 5468e4f
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 5 deletions.
4 changes: 2 additions & 2 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
# 修改成自己的配置!!!
app_config = ApplicationConfig()
app_config.docs_path = "./docs/"
app_config.llm_model_path = "/data/users/searchgpt/pretrained_models/chatglm3-6b/"
app_config.llm_model_path = "/data/users/searchgpt/pretrained_models/glm-4-9b-chat"

retriever_config = DenseRetrieverConfig(
model_name_or_path="/data/users/searchgpt/pretrained_models/bge-large-zh-v1.5",
Expand Down Expand Up @@ -174,7 +174,7 @@ def predict(input,
# )
with gr.Column(scale=4):
with gr.Row():
chatbot = gr.Chatbot(label='Gomate Application').style(height=400)
chatbot = gr.Chatbot(label='Gomate Application').style(height=650)
with gr.Row():
message = gr.Textbox(label='请输入问题')
with gr.Row():
Expand Down
12 changes: 9 additions & 3 deletions gomate/applications/rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from gomate.modules.generator.llm import GLMChat
from gomate.modules.reranker.bge_reranker import BgeReranker
from gomate.modules.retrieval.dense_retriever import DenseRetriever

from gomate.modules.citation.match_citation import MatchCitation

class ApplicationConfig():
def __init__(self):
Expand All @@ -28,7 +28,7 @@ def __init__(self, config):
self.retriever = DenseRetriever(self.config.retriever_config)
self.reranker = BgeReranker(self.config.rerank_config)
self.llm = GLMChat(self.config.llm_model_path)

self.mc=MatchCitation()
def init_vector_store(self):
"""
Expand Down Expand Up @@ -57,4 +57,10 @@ def chat(self, question: str = '', top_k: int = 5):
content = '\n'.join([content['text'] for content in contents])
print(contents)
response, history = self.llm.chat(question, [], content)
return response, history, contents
result = self.mc.ground_response(
response=response,
evidences=[content['text'] for content in contents],
selected_idx=[idx for idx in range(len(contents))],
markdown=True
)
return result, history, contents
Binary file modified resources/demo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 5468e4f

Please sign in to comment.