Skip to content

Commit

Permalink
@nekxis/interpret prompt mode (#15)
Browse files Browse the repository at this point in the history
Co-authored-by: Ignacy Łątka <74246391+LatekVo@users.noreply.github.com>
  • Loading branch information
Nekxis and latekvo authored Apr 15, 2024
1 parent 6781d4d commit a73fd8d
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 26 deletions.
67 changes: 67 additions & 0 deletions core/chainables/web.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import datetime

from core.tools.scraper import web_query_google_lookup
from core.classes.query import WebQuery
from langchain_core.prompts import ChatPromptTemplate


def web_news_lookup(prompt_text: str):
Expand All @@ -15,3 +18,67 @@ def web_wiki_lookup(prompt_text: str):
def web_docs_lookup(prompt_text: str):
query = WebQuery('docs', prompt_core=prompt_text)
return web_query_google_lookup(query)


def web_docs_lookup_prompt():
return ChatPromptTemplate.from_messages([
(
"system",
"You are a search results interpreter."
"Your job is to write an detailed instruction based on the provided context. "
"Your job is to convert all the search results you were given into a long, comprehensive and clean output. "
"Use provided search results data to explain object of user request to the best of your ability. "
"You don't have a knowledge cutoff. "
"It is currently " + datetime.date.today().strftime("%B %Y"),
),
(
"user",
"Search results data: "
"```"
"{search_data}"
"```"
'User request: "Write an article on: {user_request}"',
),
])


def web_wiki_lookup_prompt():
return ChatPromptTemplate.from_messages([
(
"system",
"You are a search results interpreter. Your job is to write an article based on the provided context. "
"Your job is to convert all the search results you were given into a long, comprehensive and clean output. "
"Use provided search results data to answer the user request to the best of your ability. "
"You don't have a knowledge cutoff. "
"It is currently " + datetime.date.today().strftime("%B %Y"),
),
(
"user",
"Search results data: "
"```"
"{search_data}"
"```"
'User request: "Write an article on: {user_request}"',
),
])


def web_news_lookup_prompt():
return ChatPromptTemplate.from_messages([
(
"system",
"You are a search results interpreter. Your job is to write an article based on the provided context. "
"Your job is to convert all the search results you were given into a long, comprehensive and clean output. "
"Use provided search results data to answer the user request to the best of your ability. "
"You don't have a knowledge cutoff. "
"It is currently " + datetime.date.today().strftime("%B %Y"),
),
(
"user",
"Search results data: "
"```"
"{search_data}"
"```"
'User request: "Write an article on: {user_request}"',
),
])
1 change: 1 addition & 0 deletions core/classes/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class WebQuery:
prompt_core: str = ''

web_query: str = ''

web_extra_params: Union[dict, None] = None
web_tbs = 0

Expand Down
44 changes: 19 additions & 25 deletions core/lookup.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,19 @@
import datetime

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from core.tools.utils import purify_name
from core.models.configurations import use_configuration
from langchain_core.prompts import ChatPromptTemplate, BaseChatPromptTemplate

# TODO: replace with puppeteer, this one gets blocked occasionally
from googlesearch import search

from core.chainables.web import web_docs_lookup, web_wiki_lookup, web_news_lookup
from core.chainables.web import (
web_docs_lookup,
web_wiki_lookup,
web_news_lookup,
web_docs_lookup_prompt,
web_news_lookup_prompt,
web_wiki_lookup_prompt)
from core.tools.dbops import get_db_by_name
from core.tools.model_loader import load_model

Expand All @@ -29,26 +33,6 @@ def web_chain_function(prompt_dict: dict):
# TODO: news searches should strictly search for news fresher than 1 month / 1 week
# TODO: news crawling should be done through only sites like medium, which are much more dense than google
# TODO: create a different function + prompt for documentation / facts searching, and make this one news focused
web_interpret_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a search results interpreter. Your job is to write an article based on the provided context. "
"Your job is to convert all the search results you were given into a long, comprehensive and clean output. "
"Use provided search results data to answer the user request to the best of your ability. "
"You don't have a knowledge cutoff. "
"It is currently " + datetime.date.today().strftime("%B %Y"),
),
(
"user",
"Search results data: "
"```"
"{search_data}"
"```"
'User request: "Write an article on: {user_request}"',
),
]
)

def get_user_prompt(_: dict):
return prompt_dict["input"]
Expand All @@ -63,6 +47,16 @@ def use_selected_mode(user_prompt: str):
else:
return web_docs_lookup(user_prompt)

def interpret_prompt_mode():
if prompt_dict["mode"] == "News":
return web_news_lookup_prompt()
elif prompt_dict["mode"] == "Docs":
return web_docs_lookup_prompt()
elif prompt_dict["mode"] == "Wiki":
return web_wiki_lookup_prompt()
else:
return web_docs_lookup_prompt()
web_interpret_prompt_mode = interpret_prompt_mode()
# NOTE: a detour has been performed here, more details:
# web_chain_function will soon become just a tool playing a part of a larger mechanism.
# prompt creation will be taken over by prompt sentiment extractor which will extract all researchable
Expand All @@ -76,7 +70,7 @@ def use_selected_mode(user_prompt: str):
# this has to be a RunnableLambda, it cannot be a string
"user_request": RunnableLambda(get_user_prompt),
}
| web_interpret_prompt
| web_interpret_prompt_mode
| llm
| output_parser
)
Expand Down
2 changes: 1 addition & 1 deletion core/tools/model_loader.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import torch
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.llms.ollama import Ollama
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
from terminal_gui import USE_HUGGING_FACE
from core.models.configurations import use_configuration

llm_config, embed_config = use_configuration()


Expand Down

0 comments on commit a73fd8d

Please sign in to comment.