-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathindex1-file.js
57 lines (49 loc) · 2.24 KB
/
index1-file.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
// 1. Import necessary modules and libraries
import { OpenAI } from 'langchain/llms';
import { RetrievalQAChain } from 'langchain/chains';
import { HNSWLib } from 'langchain/vectorstores';
import { OpenAIEmbeddings } from 'langchain/embeddings';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import * as fs from 'fs';
import * as dotenv from 'dotenv';
// 2. Load environment variables
dotenv.config();
// 3. Set up input data and paths
const txtFilename = "wi320";
const question = "Tell me about WI-320?";
const txtPath = `./${txtFilename}.txt`;
const VECTOR_STORE_PATH = `${txtFilename}.index`;
// 4. Define the main function runWithEmbeddings
export const runWithEmbeddings = async () => {
// 5. Initialize the OpenAI model with an empty configuration object
const model = new OpenAI({});
// 6. Check if the vector store file exists
let vectorStore;
if (fs.existsSync(VECTOR_STORE_PATH)) {
// 6.1. If the vector store file exists, load it into memory
console.log('Vector Exists..');
vectorStore = await HNSWLib.load(VECTOR_STORE_PATH, new OpenAIEmbeddings());
} else {
// 6.2. If the vector store file doesn't exist, create it
// 6.2.1. Read the input text file
const text = fs.readFileSync(txtPath, 'utf8');
// 6.2.2. Create a RecursiveCharacterTextSplitter with a specified chunk size
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
// 6.2.3. Split the input text into documents
const docs = await textSplitter.createDocuments([text]);
// 6.2.4. Create a new vector store from the documents using OpenAIEmbeddings
vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
// 6.2.5. Save the vector store to a file
await vectorStore.save(VECTOR_STORE_PATH);
}
// 7. Create a RetrievalQAChain by passing the initialized OpenAI model and the vector store retriever
const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
// 8. Call the RetrievalQAChain with the input question, and store the result in the 'res' variable
const res = await chain.call({
query: question,
});
// 9. Log the result to the console
console.log({ res });
};
// 10. Execute the main function runWithEmbeddings
runWithEmbeddings();