Skip to content

Commit

Permalink
chatgpt-esque responses and fixed cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
gurveervirk committed Aug 22, 2024
1 parent c7341a5 commit aeb9889
Show file tree
Hide file tree
Showing 3 changed files with 103 additions and 60 deletions.
84 changes: 56 additions & 28 deletions app/src/components/ChatArea.js
Original file line number Diff line number Diff line change
Expand Up @@ -44,45 +44,73 @@ function ChatArea({messages, setMessages, chatStarted, setChatStarted}) {

setIsSending(true); // Start sending state
const newMessage = {
id: messages.length + 1,
sender: 'user',
text: inputText.trim(),
id: messages.length + 1,
sender: 'user',
text: inputText.trim(),
};

setMessages([...messages, newMessage]);

try {
const response = await fetch('http://127.0.0.1:5000/api/query', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ query: inputText.trim(), useQueryEngine: useQueryEngine }),
});
const response = await fetch('http://127.0.0.1:5000/api/query', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ query: inputText.trim(), useQueryEngine: useQueryEngine }),
});

if (!response.ok) {
throw new Error('Failed to query the engine');
}
if (!response.ok) {
throw new Error('Failed to query the engine');
}

// Process streaming response
const reader = response.body.getReader();
const decoder = new TextDecoder();
let botMessage = '';

// Create a new bot message and add it to the messages array
const botMessageId = messages.length + 2;
setMessages((prevMessages) => [
...prevMessages,
{
id: botMessageId,
sender: 'bot',
text: '', // Initialize with an empty message
},
]);

while (true) {
const { value, done } = await reader.read();
if (done) break;

// Decode the chunk and append to botMessage
const chunk = decoder.decode(value, { stream: true });
botMessage += chunk;

// Update bot message progressively
setMessages((prevMessages) =>
prevMessages.map((msg) =>
msg.id === botMessageId
? { ...msg, text: botMessage }
: msg
)
);
}

// Once complete, update the state as required
setIsSending(false); // End sending state
setChatStarted(true); // Set chat started state

const data = await response.json();
const botMessage = {
id: messages.length + 2,
sender: 'bot',
text: data.response,
};

setMessages([...messages, newMessage, botMessage]);
setIsSending(false); // End sending state
setChatStarted(true); // Set chat started state
// // Update messages after a delay to simulate real-time conversation
// setTimeout(() => {

// }, 1000); // Simulating delay
} catch (error) {
console.error('Error querying the engine:', error);
console.error('Error querying the engine:', error);
setIsSending(false); // End sending state on error
}

setInputText('');
};


// Prompts related to planning and creating
const prompts = [
"How can I plan a trip?",
Expand Down
2 changes: 1 addition & 1 deletion app/src/index.css
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ body {

.message {
max-width: 95%;
width: fit-content;
width: max-content;
margin-bottom: 10px;
padding: 10px;
border-radius: 10px;
Expand Down
77 changes: 46 additions & 31 deletions tok/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
import subprocess
import time


app = Flask(__name__, static_folder='web/build', static_url_path='/')
ollama_process = None
CORS(app)

def start_services():
global ollama_process
try:
# Start Ollama
ollama_process = subprocess.Popen(["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Expand All @@ -31,11 +31,7 @@ def start_services():

# Start Neo4j
os.system("neo4j start")
print("Starting Neo4j...")
time.sleep(2)
print("Neo4j started successfully")

return ollama_process

except Exception as e:
print("Error starting services: ", e)
Expand Down Expand Up @@ -113,7 +109,6 @@ def save_to_session(session, data):
json.dump(session_data, session_file)
session_file.truncate()

# Global variable for current session
current_session = None

@app.route('/api/query', methods=['POST'])
Expand All @@ -124,38 +119,57 @@ def query():
data = request.json
query = data.get('query')
use_chat_engine = data.get('useQueryEngine', False)

bot_message = ""
if query is None:
return jsonify({"error": "Query parameter missing"}), 400


# Start the appropriate engine
if use_chat_engine:
if chat_engine is None:
return jsonify({"error": "Query engine not initialized"}), 500

memory.put(ChatMessage.from_str(content=query))
response = chat_engine.chat(query).response
response_generator = chat_engine.stream_chat(query).response_gen
else:
if llm is None:
return jsonify({"error": "LLM not initialized"}), 500

memory.put(ChatMessage.from_str(content=query))
response = llm.chat(memory.get_all()).message.content

data_to_save = {"query": query, "response": response}
is_new_session = False

if cur_session is None:
is_new_session = True
current_session = start_new_session()
cur_session = current_session
prompt = f'`{query}`\n\nGenerate a short and crisp title pertaining to the above query, in quotes'
title_response = llm.complete(prompt).text.strip()
title = {"title": title_response.split('"')[1]}
save_to_session(cur_session, title)

save_to_session(cur_session, data_to_save)
return jsonify({"response": response, "is_new_session": is_new_session})

response_generator = llm.stream_chat(memory.get_all())

def generate_response():
nonlocal bot_message
nonlocal cur_session
global current_session
nonlocal use_chat_engine

try:
for res in response_generator:
if not use_chat_engine:
res = res.delta
yield res
bot_message += res
# Store the complete message
data_to_save = {"query": query, "response": bot_message}

if cur_session is None:
current_session = start_new_session()
cur_session = current_session
# Generate title for the session
prompt = f'`{query}`\n\nGenerate a short and crisp title pertaining to the above query, in quotes'
title_response = llm.complete(prompt).text.strip()
title = {"title": title_response.split('"')[1]}
save_to_session(cur_session, title)

if not use_chat_engine:
memory.put(ChatMessage.from_str(content=bot_message, role='assistant'))

save_to_session(cur_session, data_to_save)

except Exception as e:
print(f"Error streaming response: {e}")
yield "[ERROR] Something went wrong. Please try again later."

return app.response_class(generate_response(), mimetype='text/plain')

except Exception as e:
print(e)
traceback.print_exc()
Expand Down Expand Up @@ -269,19 +283,20 @@ def start_flask_app():
ui.run()

def cleanup():
global ollama_process
if ollama_process:
ollama_process.terminate()
os.system("powershell -Command \"Get-Process | Where-Object {$_.ProcessName -like '*ollama*'} | Stop-Process\"")
os.system("neo4j stop")

if __name__ == '__main__':
ui = FlaskUI(app = app, server="flask", width=500, height=500, port=5000, on_shutdown=cleanup) # Change width and height as needed
ui = FlaskUI(app = app, server="flask", width=1280, height=720, port=5000, on_shutdown=cleanup) # Change width and height as needed
try:
create_directory_if_not_exists('prev_msgs')
ollama_process = start_services()
start_services()
initialize_globals()
start_flask_app()

finally:
# Terminate the services when Eel window is closed
ollama_process.terminate()
os.system("neo4j stop")

0 comments on commit aeb9889

Please sign in to comment.