-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlcel_runnable_example.py
36 lines (26 loc) · 1.37 KB
/
lcel_runnable_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableParallel
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama import ChatOllama, OllamaEmbeddings
from langchain_community.vectorstores import FAISS
model_name = "llama3.1"
#setting up vectorstore and retriever
vectorstore = FAISS.from_texts(["I love reading Science Fictions and Detective Novels. My favourite writes in these genres are Agatha Christie and Isaac Asimov"],
embedding = OllamaEmbeddings(model = model_name))
data_retriever = vectorstore.as_retriever()
#setting up the template with context and question
template = """
Answer the question asked from the give context: {context}
Question: {question}
"""
#setting up the prompt
prompt = ChatPromptTemplate.from_template(template)
def glorify(text):
#returning as AIMessage since the text is also received by this function as AIMessage
return AIMessage("\n>>> " + text.content + " <<<\n")
#setting up the chain
chain = RunnableParallel(
{"context": data_retriever, "question": RunnablePassthrough()}) | prompt | ChatOllama(model = model_name) | RunnableLambda(glorify) | StrOutputParser()
output = chain.invoke("Who are my favourite writers in which genres?")
print(output)