-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathchat.py
95 lines (66 loc) · 3.23 KB
/
chat.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import openai
import subprocess
from telegram import Update
from telegram.ext import ContextTypes
IMAGE, CHAT, SCRIBE = range(3)
async def chat_start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
messages[update.effective_user.name] = [{"role": "system",
"content": "You witty and knowledgeable assistant. You always glad to "
"tell jokes."}]
await update.message.reply_text("Starting conversation!")
return CHAT
async def speech_to_text(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
speech = await context.bot.get_file(update.message.voice.file_id)
await speech.download('speech.mp3')
subprocess.call(['ffmpeg', '-i', 'speech.mp3',
'speech.wav', '-y'])
audio_file = open("speech.wav", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return transcript['text']
messages = {}
async def chat(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
if update.effective_user.name not in messages.keys():
await chat_start(update, context)
if update.message.text is not None:
prompt = update.message.text
else:
prompt = await speech_to_text(update, context)
messages[update.effective_user.name].append({"role": "user", "content": f'username {update.effective_user.name}'
f' message {prompt}'})
msg = await update.message.reply_text(f"Answering...")
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages[update.effective_user.name],
temperature=0.8,
max_tokens=600,
frequency_penalty=0.0,
presence_penalty=0.2,
)
answer = response['choices'][0]['message']['content']
await msg.edit_text(text=answer)
messages[update.effective_user.name].append({"role": "assistant", "content": answer})
print(response["usage"])
print(len(messages[update.effective_user.name]))
if response["usage"]["total_tokens"] > 3400:
del messages[update.effective_user.name][:5]
return CHAT
except:
await msg.edit_text(text="Error, please try again")
return CHAT
async def transcribe(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
await update.message.reply_text('Redirect or record voice to transcribe')
return SCRIBE
async def scribe(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
msg = await update.message.reply_text('Transcribing...')
try:
speech = await context.bot.get_file(update.message.voice.file_id)
await speech.download('speech.mp3')
subprocess.call(['ffmpeg', '-i', 'speech.mp3',
'speech.wav', '-y'])
audio_file = open("speech.wav", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
await msg.edit_text(transcript['text'])
except:
await msg.edit_text('Error, please try again')
return transcribe(update, context)