Skip to content

Commit

Permalink
Merge branch 'logic-fixes' into llms-refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
fegloff committed Apr 5, 2024
2 parents a00a6da + cd4f284 commit e1868f8
Show file tree
Hide file tree
Showing 28 changed files with 8,023 additions and 4,384 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM node:16
FROM node:20

WORKDIR /usr/src/app

Expand Down
11,305 changes: 7,081 additions & 4,224 deletions package-lock.json

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@
"@google-cloud/translate": "^8.0.2",
"@grammyjs/auto-chat-action": "^0.1.1",
"@grammyjs/auto-retry": "^1.1.1",
"@grammyjs/conversations": "^1.1.2",
"@grammyjs/files": "^1.0.4",
"@grammyjs/conversations": "^1.2.0",
"@grammyjs/files": "^1.1.0",
"@grammyjs/menu": "^1.2.1",
"@grammyjs/ratelimiter": "^1.2.0",
"@grammyjs/runner": "^2.0.3",
Expand All @@ -96,7 +96,7 @@
"express-async-handler": "^1.2.0",
"form-data": "^4.0.0",
"gpt-tokenizer": "^2.1.1",
"grammy": "^1.18.3",
"grammy": "^1.22.4",
"jsqr": "^1.4.0",
"litllm": "^3.0.0",
"lokijs": "^1.5.12",
Expand Down
48 changes: 37 additions & 11 deletions src/bot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ import { VoiceMemo } from './modules/voice-memo'
// import { QRCodeBot } from './modules/qrcode/QRCodeBot'
// import { SDImagesBot } from './modules/sd-images'
import { OpenAIBot } from './modules/open-ai'
import { ClaudeBot } from './modules/llms/claudeBot'
import { VertexBot } from './modules/llms/vertexBot'
import { OneCountryBot } from './modules/1country'
import { WalletConnect } from './modules/walletconnect'
import { BotPayments } from './modules/payment'
Expand Down Expand Up @@ -248,6 +250,8 @@ const walletConnect = new WalletConnect()
const payments = new BotPayments()
const schedule = new BotSchedule(bot)
const openAiBot = new OpenAIBot(payments)
const claudeBot = new ClaudeBot(payments)
const vertexBot = new VertexBot(payments)
const oneCountryBot = new OneCountryBot(payments)
const translateBot = new TranslateBot()
const telegramPayments = new TelegramPayments(payments)
Expand Down Expand Up @@ -372,6 +376,8 @@ const PayableBots: Record<string, PayableBotConfig> = {
textToSpeech: { bot: textToSpeechBot },
voiceToVoiceGPTBot: { bot: voiceToVoiceGPTBot },
voiceToText: { bot: voiceToTextBot },
claudeBot: { bot: claudeBot },
vertexBot: { bot: vertexBot },
openAiBot: {
enabled: (ctx: OnMessageContext) => ctx.session.openAi.imageGen.isEnabled,
bot: openAiBot
Expand All @@ -396,7 +402,7 @@ const executeOrRefund = (ctx: OnMessageContext, price: number, bot: PayableBot):
const onMessage = async (ctx: OnMessageContext): Promise<void> => {
try {
// bot doesn't handle forwarded messages
if (!ctx.message.forward_from) {
if (!ctx.message.forward_origin) {
await assignFreeCredits(ctx)

if (telegramPayments.isSupportedEvent(ctx)) {
Expand Down Expand Up @@ -467,6 +473,16 @@ const onCallback = async (ctx: OnCallBackQueryData): Promise<void> => {
// return
// }

if (vertexBot.isSupportedEvent(ctx)) {
await vertexBot.onEvent(ctx)
return
}

if (claudeBot.isSupportedEvent(ctx)) {
await claudeBot.onEvent(ctx)
return
}

if (openAiBot.isSupportedEvent(ctx)) {
await openAiBot.onEvent(ctx, (e) => {
logger.error(e)
Expand Down Expand Up @@ -501,7 +517,7 @@ bot.command(['start', 'help', 'menu'], async (ctx) => {
await ctx.reply(startText, {
parse_mode: 'Markdown',
reply_markup: mainMenu,
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})
Expand All @@ -515,7 +531,7 @@ bot.command('more', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(commandsHelpText.more, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})
Expand All @@ -524,7 +540,7 @@ bot.command('terms', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(TERMS.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})
Expand All @@ -533,7 +549,7 @@ bot.command('support', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(SUPPORT.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})
Expand All @@ -542,23 +558,23 @@ bot.command('models', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(MODELS.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true
link_preview_options: { is_disabled: true }
})
})

bot.command('lang', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(LANG.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true
link_preview_options: { is_disabled: true }
})
})

bot.command('feedback', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(FEEDBACK.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})
Expand All @@ -567,14 +583,15 @@ bot.command('love', async (ctx) => {
writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler)
return await ctx.reply(LOVE.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})

bot.command('stop', async (ctx) => {
logger.info('/stop command')
await openAiBot.onStop(ctx as OnMessageContext)
await claudeBot.onStop(ctx as OnMessageContext)
ctx.session.translate.enable = false
ctx.session.translate.languages = []
ctx.session.oneCountry.lastDomain = ''
Expand All @@ -584,15 +601,24 @@ bot.command(['alias', 'aliases'], async (ctx) => {
logger.info('/alias command')
return await ctx.reply(ALIAS.text, {
parse_mode: 'Markdown',
disable_web_page_preview: true,
link_preview_options: { is_disabled: true },
message_thread_id: ctx.message?.message_thread_id
})
})

// bot.command(['end'], async (ctx) => {
// logger.info('/end command')
// return await ctx.reply(ALIAS.text, {
// parse_mode: 'Markdown',
// link_preview_options: { is_disabled: true },
// message_thread_id: ctx.message?.message_thread_id
// })
// })

// bot.command("memo", (ctx) => {
// ctx.reply(MEMO.text, {
// parse_mode: "Markdown",
// disable_web_page_preview: true,
// link_preview_options: { is_disabled: true },
// });
// });

Expand Down
2 changes: 1 addition & 1 deletion src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ export default {
? parseInt(process.env.SESSION_TIMEOUT)
: 48, // in hours
llms: {
apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000',
apiEndpoint: process.env.LLMS_ENDPOINT, // // process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000',
wordLimit: 50,
model: 'chat-bison',
minimumBalance: 0,
Expand Down
2 changes: 1 addition & 1 deletion src/modules/1country/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ export class OneCountryBot implements PayableBot {
// await ctx.reply(`The Domain [${fullUrl}](${config.country.hostname}/new?domain=${lastDomain}) was registered`, {
// parse_mode: 'Markdown',
// message_thread_id: ctx.message?.message_thread_id,
// disable_web_page_preview: false
// link_preview_options: { is_disabled: true }
// })
ctx.transient.analytics.sessionState = RequestState.Success
ctx.transient.analytics.actualResponseTime = now()
Expand Down
21 changes: 15 additions & 6 deletions src/modules/llms/api/athropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ const logger = pino({
}
})

const API_ENDPOINT = config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000' // config.llms.apiEndpoint
const API_ENDPOINT = config.llms.apiEndpoint // 'http://127.0.0.1:5000' // config.llms.apiEndpoint

export const anthropicCompletion = async (
conversation: ChatConversation[],
Expand Down Expand Up @@ -63,6 +63,7 @@ export const anthropicStreamCompletion = async (
msgId: number,
limitTokens = true
): Promise<LlmCompletion> => {
logger.info(`Handling ${model} stream completion`)
const data = {
model,
stream: true, // Set stream to true to receive the completion as a stream
Expand All @@ -87,15 +88,23 @@ export const anthropicStreamCompletion = async (
const msg = chunk.toString()
if (msg) {
if (msg.startsWith('Input Token')) {
inputTokens = msg.split('Input Token: ')[1]
const regex = /Input Token: (\d+)(.*)/
// Execute the regular expression
const match = regex.exec(msg)
if (match) {
inputTokens = match[1].trim() // Extract the integer part
if (match.length >= 3) {
completion += match[2]
}
}
} else if (msg.startsWith('Output Tokens')) {
outputTokens = msg.split('Output Tokens: ')[1]
outputTokens = msg.split('Output Tokens: ')[1].trim()
} else {
wordCount++
completion += msg // .split('Text: ')[1]
completion += msg
if (msg.includes('Output Tokens:')) {
const tokenMsg = msg.split('Output Tokens: ')[1]
outputTokens = tokenMsg.split('Output Tokens: ')[1]
outputTokens = msg.split('Output Tokens: ')[1].trim()
// outputTokens = tokenMsg.split('Output Tokens: ')[1].trim()
completion = completion.split('Output Tokens: ')[0]
}
if (wordCount > wordCountMinimum) { // if (chunck === '.' && wordCount > wordCountMinimum) {
Expand Down
96 changes: 96 additions & 0 deletions src/modules/llms/claudeBot.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import { type BotPayments } from '../payment'
import {
type OnMessageContext,
type OnCallBackQueryData,
type ChatConversation
} from '../types'
import {
hasClaudeOpusPrefix,
isMentioned,
SupportedCommands
} from './helpers'
import { type LlmCompletion } from './api/llmApi'
import { LlmsModelsEnum } from './types'

import { anthropicCompletion, anthropicStreamCompletion } from './api/athropic'
import { LlmsBase } from './llmsBase'
export class ClaudeBot extends LlmsBase {
constructor (payments: BotPayments) {
super(payments, 'ClaudeBot')
}

public getEstimatedPrice (ctx: any): number {
return 0
}

public isSupportedEvent (
ctx: OnMessageContext | OnCallBackQueryData
): boolean {
const hasCommand = ctx.hasCommand([SupportedCommands.claudeOpus,
SupportedCommands.opus,
SupportedCommands.opusShort,
SupportedCommands.claudeShort,
SupportedCommands.claudeSonnet,
SupportedCommands.sonnet,
SupportedCommands.sonnetShort,
SupportedCommands.claudeHaiku,
SupportedCommands.haikuShort])
if (isMentioned(ctx)) {
return true
}
const chatPrefix = this.hasPrefix(ctx.message?.text ?? '')
if (chatPrefix !== '') {
return true
}
return hasCommand
}

hasPrefix (prompt: string): string {
return (
hasClaudeOpusPrefix(prompt)
)
}

async chatStreamCompletion (
conversation: ChatConversation[],
model: LlmsModelsEnum,
ctx: OnMessageContext | OnCallBackQueryData,
msgId: number,
limitTokens: boolean): Promise<LlmCompletion> {
return await anthropicStreamCompletion(
conversation,
model as LlmsModelsEnum,
ctx,
msgId,
true // telegram messages has a character limit
)
}

async chatCompletion (
conversation: ChatConversation[],
model: LlmsModelsEnum
): Promise<LlmCompletion> {
return await anthropicCompletion(conversation, model)
}

public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
ctx.transient.analytics.module = this.module
const isSupportedEvent = this.isSupportedEvent(ctx)
if (!isSupportedEvent && ctx.chat?.type !== 'private') {
this.logger.warn(`### unsupported command ${ctx.message?.text}`)
return
}

if (ctx.hasCommand([SupportedCommands.claudeOpus, SupportedCommands.opus, SupportedCommands.opusShort, SupportedCommands.claudeShort]) || (hasClaudeOpusPrefix(ctx.message?.text ?? '') !== '')) {
await this.onChat(ctx, LlmsModelsEnum.CLAUDE_OPUS, true)
return
}
if (ctx.hasCommand([SupportedCommands.claudeSonnet, SupportedCommands.sonnet, SupportedCommands.sonnetShort])) {
await this.onChat(ctx, LlmsModelsEnum.CLAUDE_SONNET, true)
return
}
if (ctx.hasCommand([SupportedCommands.claudeHaiku, SupportedCommands.haikuShort])) {
await this.onChat(ctx, LlmsModelsEnum.CLAUDE_HAIKU, false)
}
}
}
Loading

0 comments on commit e1868f8

Please sign in to comment.