Skip to content

Commit

Permalink
update default model to gpt-4o + fix edit message error on streaming …
Browse files Browse the repository at this point in the history
…completion
  • Loading branch information
fegloff committed May 27, 2024
1 parent 95e6de2 commit 47489f3
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 22 deletions.
4 changes: 3 additions & 1 deletion src/modules/llms/api/athropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ export const anthropicStreamCompletion = async (
let completion = ''
let outputTokens = ''
let inputTokens = ''
let message = ''
for await (const chunk of completionStream) {
const msg = chunk.toString()
if (msg) {
Expand Down Expand Up @@ -115,7 +116,8 @@ export const anthropicStreamCompletion = async (
completion = completion.replaceAll('...', '')
completion += '...'
wordCount = 0
if (ctx.chat?.id) {
if (ctx.chat?.id && message !== completion) {
message = completion
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch(async (e: any) => {
Expand Down
26 changes: 15 additions & 11 deletions src/modules/llms/api/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ export const streamChatCompletion = async (
throw new Error('Context chat id should not be empty after openAI streaming')
}
// let wordCountMinimumCounter = 1;
let message = ''
for await (const part of stream) {
wordCount++
const chunck = part.choices[0]?.delta?.content
Expand All @@ -147,19 +148,22 @@ export const streamChatCompletion = async (
completion = completion.replaceAll('...', '')
completion += '...'
wordCount = 0
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch(async (e: any) => {
if (e instanceof GrammyError) {
if (e.error_code !== 400) {
throw e
if (message !== completion) {
message = completion
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch(async (e: any) => {
if (e instanceof GrammyError) {
if (e.error_code !== 400) {
throw e
} else {
logger.error(e)
}
} else {
logger.error(e)
throw e
}
} else {
throw e
}
})
})
}
}
}
completion = completion.replaceAll('...', '')
Expand Down
6 changes: 4 additions & 2 deletions src/modules/llms/api/vertex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ export const vertexStreamCompletion = async (
let completion = ''
let outputTokens = ''
let inputTokens = ''
let message = ''
for await (const chunk of completionStream) {
const msg = chunk.toString()
if (msg) {
Expand All @@ -97,15 +98,16 @@ export const vertexStreamCompletion = async (
}
completion = completion.replaceAll('...', '')
completion += '...'
if (ctx.chat?.id) {
if (ctx.chat?.id && message !== completion) {
message = completion
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch(async (e: any) => {
if (e instanceof GrammyError) {
if (e.error_code !== 400) {
throw e
} else {
logger.error(e)
logger.error(e.message)
}
} else {
throw e
Expand Down
10 changes: 2 additions & 8 deletions src/modules/llms/openaiBot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,21 +124,15 @@ export class OpenAIBot extends LlmsBase {
SupportedCommands.chat,
SupportedCommands.ask,
SupportedCommands.gpt4,
SupportedCommands.gpt
SupportedCommands.gpt,
SupportedCommands.gpto
]) ||
hasChatPrefix(ctx.message?.text ?? '') ||
isMentioned(ctx) ||
((ctx.message?.text?.startsWith('chat ') ??
ctx.message?.text?.startsWith('ask ')) &&
ctx.chat?.type === 'private')
) {
this.updateSessionModel(ctx, LlmsModelsEnum.GPT_4)
await this.onChat(ctx, LlmsModelsEnum.GPT_4, true, false)
return
}

if (
ctx.hasCommand([SupportedCommands.gpto])) {
this.updateSessionModel(ctx, LlmsModelsEnum.GPT_4O)
await this.onChat(ctx, LlmsModelsEnum.GPT_4O, true, false)
return
Expand Down

0 comments on commit 47489f3

Please sign in to comment.