diff --git a/package-lock.json b/package-lock.json index b0af6a97..094ada88 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,9 +37,11 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", "node-cron": "^3.0.2", + "node-html-parser": "^6.1.13", "openai": "^4.0.1", "otpauth": "^9.1.3", "pg": "^8.11.2", @@ -10796,6 +10798,14 @@ "node": ">= 0.4" } }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, "node_modules/help-me": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz", @@ -13883,6 +13893,17 @@ "tmpl": "1.0.5" } }, + "node_modules/marked": { + "version": "14.1.2", + "resolved": "https://registry.npmjs.org/marked/-/marked-14.1.2.tgz", + "integrity": "sha512-f3r0yqpz31VXiDB/wj9GaOB0a2PRLQl6vJmXiFrniNwjkKdvakqJRULhjFKJpxOchlCRiG5fcacoUZY5Xa6PEQ==", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 18" + } + }, "node_modules/md5": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", @@ -14397,6 +14418,15 @@ "node-gyp-build-test": "build-test.js" } }, + "node_modules/node-html-parser": { + "version": "6.1.13", + "resolved": "https://registry.npmjs.org/node-html-parser/-/node-html-parser-6.1.13.tgz", + "integrity": "sha512-qIsTMOY4C/dAa5Q5vsobRpOOvPfC4pB61UVW2uSwZNUp0QU/jCekTal1vMmbO0DgdHeLUJpv/ARmDqErVxA3Sg==", + "dependencies": { + "css-select": "^5.1.0", + "he": "1.2.0" + } + }, "node_modules/node-int64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", diff --git a/package.json b/package.json index 141c9370..d6326d0f 100644 --- a/package.json +++ b/package.json @@ -101,9 +101,11 @@ "litllm": "^3.0.0", "lokijs": "^1.5.12", "lru-cache": "^10.0.0", + "marked": "^14.1.2", "moment": "^2.29.4", "moment-timezone": "^0.5.43", "node-cron": "^3.0.2", + "node-html-parser": "^6.1.13", "openai": "^4.0.1", "otpauth": "^9.1.3", "pg": "^8.11.2", diff --git a/src/bot.ts b/src/bot.ts index 930b7b3e..e15c4a8a 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -21,7 +21,7 @@ import { type OnMessageContext, type PayableBot, type PayableBotConfig, RequestState, type UtilityBot } from './modules/types' -import { mainMenu } from './pages' +import { groupsMainMenu, mainMenu, privateChatMainMenu } from './pages' import { TranslateBot } from './modules/translate/TranslateBot' import { VoiceMemo } from './modules/voice-memo' // import { QRCodeBot } from './modules/qrcode/QRCodeBot' @@ -55,9 +55,10 @@ import { VoiceToTextBot } from './modules/voice-to-text' import { now } from './utils/perf' import { VoiceToVoiceGPTBot } from './modules/voice-to-voice-gpt' // import { VoiceCommand } from './modules/voice-command' -import { createInitialSessionData } from './helpers' +import { createInitialSessionData, addQuotePrefix, markdownToTelegramHtml } from './helpers' import { LlamaAgent } from './modules/subagents' import { llmModelManager } from './modules/llms/utils/llmModelsManager' +import { HmnyBot } from './modules/hmny' Events.EventEmitter.defaultMaxListeners = 30 @@ -184,7 +185,10 @@ bot.use(async (ctx: BotContext, next: NextFunction): Promise => { bot.use( session({ - initial: createInitialSessionData, + initial: () => { + logger.info('Creating new session') + return createInitialSessionData() + }, storage: enhanceStorage({ storage: new MemorySessionStorage>(), millisecondsToLive: config.sessionTimeout * 60 * 60 * 1000 // 48 hours @@ -193,6 +197,8 @@ bot.use( ) bot.use(autoChatAction()) bot.use(mainMenu) +bot.use(privateChatMainMenu) +bot.use(groupsMainMenu) const voiceMemo = new VoiceMemo() // const qrCodeBot = new QRCodeBot() @@ -212,7 +218,7 @@ const voiceTranslateBot = new VoiceTranslateBot(payments) const textToSpeechBot = new TextToSpeechBot(payments) const voiceToTextBot = new VoiceToTextBot(payments) const voiceToVoiceGPTBot = new VoiceToVoiceGPTBot(payments) - +const hmnyBot = new HmnyBot() // const voiceCommand = new VoiceCommand(openAiBot) bot.on('message:new_chat_members:me', async (ctx) => { @@ -324,6 +330,7 @@ const PayableBots: Record = { // voiceCommand: { bot: voiceCommand }, // qrCodeBot: { bot: qrCodeBot }, // sdImagesBot: { bot: sdImagesBot }, + hmny: { bot: hmnyBot }, voiceTranslate: { bot: voiceTranslateBot }, voiceMemo: { bot: voiceMemo }, translateBot: { bot: translateBot }, @@ -356,8 +363,14 @@ const executeOrRefund = async (ctx: OnMessageContext, price: number, bot: Payabl const onMessage = async (ctx: OnMessageContext): Promise => { try { - // bot doesn't handle forwarded messages - if (!ctx.message.forward_origin) { + const { voice, audio } = ctx.update.message + const isVoiceForwardingEnabled = ctx.session.voiceMemo.isVoiceForwardingEnabled || + ctx.session.voiceMemo.isOneTimeForwardingVoiceEnabled + // bot doesn't handle forwarded messages unless is audio/voice message and is isVoiceForwardingEnabled is true + if (!ctx.message.forward_origin || + (isVoiceForwardingEnabled && + ctx.message.forward_origin && + (!!voice || !!audio))) { await assignFreeCredits(ctx) if (llamaAgent.isSupportedEvent(ctx)) { @@ -460,16 +473,24 @@ bot.command(['start', 'help', 'menu'], async (ctx) => { const { totalCreditsAmount } = await chatService.getUserCredits(accountId) const balance = addressBalance.plus(totalCreditsAmount) const balanceOne = payments.toONE(balance, false).toFixed(2) + + const broadcastMessage = ctx.session.lastBroadcast + ? `\n\nLatest from the team\n${await addQuotePrefix(ctx.session.lastBroadcast)}` + : '' + const startText = commandsHelpText.start + .replaceAll('$BROADCAST', broadcastMessage) .replaceAll('$CREDITS', balanceOne + '') .replaceAll('$WALLET_ADDRESS', account.address) - await ctx.reply(startText, { - parse_mode: 'Markdown', - reply_markup: mainMenu, + const htmlStartText = await markdownToTelegramHtml(startText) + + await ctx.reply(htmlStartText, { + parse_mode: 'HTML', + reply_markup: ctx.chat.type === 'private' ? privateChatMainMenu : groupsMainMenu, link_preview_options: { is_disabled: true }, message_thread_id: ctx.message?.message_thread_id - }) + }).catch(e => { console.log(e) }) }) const logErrorHandler = (ex: any): void => { @@ -506,7 +527,6 @@ bot.command('support', async (ctx) => { bot.command('models', async (ctx) => { const models = llmModelManager.generateTelegramOutput() - console.log(models) writeCommandLog(ctx as OnMessageContext).catch(logErrorHandler) return await ctx.reply(models, { parse_mode: 'Markdown', diff --git a/src/config.ts b/src/config.ts index a8baeb22..09d61672 100644 --- a/src/config.ts +++ b/src/config.ts @@ -62,14 +62,15 @@ export default { } }, chatGpt: { - chatCompletionContext: - 'You are an AI Bot powered by Harmony. Your strengths are ai api aggregation for chat, image, and voice interactions. Leveraging a suite of sophisticated subagents, you have the capability to perform tasks such as internet browsing and accessing various services. Your responses should be adaptable to the conversation while maintaining brevity, ideally not exceeding 100 words.', + chatCompletionContext: 'Reply ideally not exceeding 100 words', + // 'You are an AI Bot powered by Harmony. Your strengths are ai api aggregation for chat, image, and voice interactions. Leveraging a suite of sophisticated subagents, you have the capability to perform tasks such as internet browsing and accessing various services. Your responses should be adaptable to the conversation while maintaining brevity, ideally not exceeding 100 words.', // 'You are an AI Bot powered dby Harmony. Your strengths are ai api aggregation for chat, image, and voice interactions, and more. You have subagents that helps you with task like browsing the internet, and other services. Respond flexibly, but try to stay within 100 words in all of your responses.', webCrawlerContext: 'You will receive a web crawling text. Please get keys concepts, but try to stay within 4000 words in your response.', - visionCompletionContext: `You are a concise AI Bot powered by Harmony, capable of providing complete responses within a 100-word limit. - For each additional image, extend your response by 30 words. Your responses should be informative and comprehensive, - wrapping up all details without leaving them hanging. Use your flexibility to adapt to any topic, and deliver engaging and fulfilling - conversations in a succinct manner.`, + visionCompletionContext: 'Response within a 100-word limit', + // `You are a concise AI Bot powered by Harmony, capable of providing complete responses within a 100-word limit. + // For each additional image, extend your response by 30 words. Your responses should be informative and comprehensive, + // wrapping up all details without leaving them hanging. Use your flexibility to adapt to any topic, and deliver engaging and fulfilling + // conversations in a succinct manner.`, maxTokens: parseInt(process.env.OPENAI_MAX_TOKENS ?? '800'), // telegram messages has a char limit wordLimit: 30, wordCountBetween: 10, @@ -105,6 +106,7 @@ export default { }, voiceMemo: { isEnabled: Boolean(parseInt(process.env.VOICE_MEMO_ENABLED ?? '1')), + isVoiceForwardingEnabled: Boolean(parseInt(process.env.VOICE_MEMO_FORWARDING_ENABLED ?? '0')), telegramApiId: parseInt(process.env.TELEGRAM_API_ID ?? ''), telegramApiHash: process.env.TELEGRAM_API_HASH ?? '', speechmaticsApiKey: process.env.SPEECHMATICS_API_KEY ?? '', diff --git a/src/constants.ts b/src/constants.ts index 77285dfc..712b08f4 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -1,5 +1,6 @@ export enum MenuIds { MAIN_MENU = 'main-menu', + PRIVATE_MAIN_MENU = 'private-main-menu', IMAGE_MENU = 'image-menu-main', QR_BOT_MAIN = 'qrbot-menu-main', QR_BOT_CHANGE_OPTIONS = 'qrbot-menu-change-options', @@ -16,6 +17,20 @@ export enum MenuIds { CHAT_GPT_MODEL = 'chat-gpt-model', } +export const MENU_URL_BUTTONS = [ + { + text: 'πŸ›  Build on Harmony', + url: 'https://docs.harmony.one/home' + }, + { + text: '🏠 Harmony', + url: 'https://harmony.one' + } +] + +export const docsMenuLabel = 'A fast and open platform for decentralized applications of AI ∩ Crypto. To scale trust, create a radically fair economy, and push humanity into becoming deus.' + +const DOUBLE_NEW_LINE = '\n\n' // const balance = await payments.getAddressBalance(userWalletAddress); // const balanceOne = payments.toONE(balance, false).toFixed(2); // const startText = commandsHelpText.start @@ -23,23 +38,16 @@ export enum MenuIds { // .replace("$WALLET_ADDRESS", userWalletAddress); // Your credits: $CREDITS ONE tokens. Send to $WALLET_ADDRESS for recharge. +let startText = "Hello, I'm ONE Bot on Telegram from Harmony – for ALL your AI wishes πŸ§šβ€β™€οΈ.$BROADCAST" +startText += `${DOUBLE_NEW_LINE}/ask how to add harmony to metamask` +startText += `${DOUBLE_NEW_LINE}/image glimpses of a herd of wild elephants crossing a savanna` +startText += `${DOUBLE_NEW_LINE}/more Summarize voice messages, artistic QR code, ChatGPT 32K, DALL-E, Wallet Connect, send tokens, sign transactions...` +startText += `${DOUBLE_NEW_LINE}/help Show this message. Join user group @onebotlove or read docs at harmony.one/bot.` +startText += `${DOUBLE_NEW_LINE}Your credits in 1Bot Credits: $CREDITS` +startText += `${DOUBLE_NEW_LINE}Send ONE to: \`$WALLET_ADDRESS\`` export const commandsHelpText = { - start: `Hello, I'm ONE Bot on Telegram from Harmony – for ALL your AI wishes πŸ§šβ€β™€οΈ. - -/ask act like elon musk, expand our [q4 roadmap](https://xn--qv9h.s.country/p/generating-roadmap-as-ceo-vs-cto) "telegram ai bot" -/ask act like mark zuckerberg instead - -/image glimpses of a herd of wild elephants crossing a savanna - -/more Summarize voice messages, artistic QR code, ChatGPT 32K, DALL-E, Wallet Connect, send tokens, sign transactions... - -/help Show this message. Join user group @onebotlove or read docs at harmony.one/bot. - -Your credits in 1Bot Credits: $CREDITS - -Send ONE to: \`$WALLET_ADDRESS\` -`, + start: startText, // more: `/ explain like i am 5, what is a superconductor? // . explain like i have a phd, what is category theory? @@ -278,6 +286,11 @@ export const PROMPTS = { '(KHFB, AuroraNegative),(Worst Quality, Low Quality:1.4), ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face, blurry, draft, grainy' } +export const VOICE_MEMO_FORWARDING = { + enabled: 'Voice note forwarding is now active. The next voice note you send will be forwarded automatically. This setting will deactivate after forwarding one voice note.', + restricted: 'Sorry, voice note forwarding can only be enabled by admin users. If you need this feature, please contact an admin for assistance.' +} + export const ALIAS = { text: ` Shortcut Commands ⏳ diff --git a/src/database/stats.service.ts b/src/database/stats.service.ts index 6203adbe..9c25136c 100644 --- a/src/database/stats.service.ts +++ b/src/database/stats.service.ts @@ -239,4 +239,11 @@ export class StatsService { return await queryBuilder.execute() } + + public async getAllChatId (): Promise { + const queryBuilder = logRepository.createQueryBuilder('logs') + .select('distinct("groupId")') + + return await queryBuilder.execute() + } } diff --git a/src/helpers.ts b/src/helpers.ts index c446376b..246c124b 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -2,7 +2,8 @@ import config from './config' import { LlmModelsEnum } from './modules/llms/utils/llmModelsManager' import { type DalleImageSize } from './modules/llms/utils/types' import { type BotSessionData } from './modules/types' - +import { marked } from 'marked' +import { parse as parseHtml, HTMLElement } from 'node-html-parser' export function createInitialSessionData (): BotSessionData { return { oneCountry: { lastDomain: '' }, @@ -47,6 +48,82 @@ export function createInitialSessionData (): BotSessionData { isInscriptionLotteryEnabled: config.openAi.dalle.isInscriptionLotteryEnabled, imgInquiried: [] }, - currentModel: LlmModelsEnum.GPT_4O + voiceMemo: { + isOneTimeForwardingVoiceEnabled: false, + isVoiceForwardingEnabled: config.voiceMemo.isVoiceForwardingEnabled + }, + currentModel: LlmModelsEnum.GPT_4O, + lastBroadcast: '' + } +} + +type AllowedAttributesType = Record + +function sanitizeHtml (html: string): string { + const allowedTags = [ + 'b', 'strong', 'i', 'em', 'u', 'ins', 's', 'strike', 'del', + 'span', 'tg-spoiler', 'a', 'code', 'pre', 'tg-emoji', 'blockquote' + ] + const allowedAttributes: AllowedAttributesType = { + a: ['href'], + span: ['class'], + 'tg-emoji': ['emoji-id'], + pre: ['class'], + code: ['class'], + blockquote: ['expandable'] + } + const root = parseHtml(html) + + function walk (node: HTMLElement): void { + if (node.nodeType === 1 && node.tagName) { // ELEMENT_NODE with a tagName + const tagName = node.tagName.toLowerCase() + if (!allowedTags.includes(tagName)) { + const children = node.childNodes + node.replaceWith(...children) + children.forEach(child => { + if (child instanceof HTMLElement) { + walk(child) + } + }) + return + } else { + // Remove disallowed attributes + const allowedAttrs = allowedAttributes[tagName] || [] + const attributes = node.attributes + Object.keys(attributes).forEach(attrName => { + if (!allowedAttrs.includes(attrName)) { + node.removeAttribute(attrName) + } + }) + // Special case for span with tg-spoiler class + if (tagName === 'span' && node.getAttribute('class') !== 'tg-spoiler') { + node.removeAttribute('class') + } + } + } + node.childNodes.forEach(child => { + if (child instanceof HTMLElement) { + walk(child) + } + }) } + + walk(root) + return root.toString() +} + +export async function markdownToTelegramHtml (text: string): Promise { + try { + const html = await marked(text) + return sanitizeHtml(html) + } catch (error) { + console.error('Error parsing markdown:', error) + return text // Return original text if parsing fails + } +} + +export async function addQuotePrefix (text: string): Promise { + if (!text) return '' + const htmlText = await markdownToTelegramHtml(text) + return `
${htmlText}
` } diff --git a/src/modules/hmny/index.ts b/src/modules/hmny/index.ts new file mode 100644 index 00000000..daad80f3 --- /dev/null +++ b/src/modules/hmny/index.ts @@ -0,0 +1,263 @@ +import { GrammyError } from 'grammy' +import * as Sentry from '@sentry/node' +import { type Logger, pino } from 'pino' +import { + type OnMessageContext, + type OnCallBackQueryData, + RequestState, + type PayableBot +} from '../types' +import { isAdmin } from '../llms/utils/context' +import { sendMessage, MAX_TRIES } from '../llms/utils/helpers' +import { sleep } from '../sd-images/utils' +import { now } from '../../utils/perf' +import { statsService } from '../../database/services' +import { type BroadcastError } from './types' +// import { docsMenu, docsMenuLabel } from './helpers' + +export enum SupportedCommands { + broadcast = 'broadcast', + preview = 'preview' + // docs = 'docs' +} + +export class HmnyBot implements PayableBot { + public readonly module = 'HmnyAdminBot' + private readonly logger: Logger + + constructor () { + this.logger = pino({ + name: this.module, + transport: { + target: 'pino-pretty', + options: { colorize: true } + } + }) + } + + public isSupportedEvent ( + ctx: OnMessageContext | OnCallBackQueryData + ): boolean { + const hasCommand = ctx.hasCommand( + Object.values(SupportedCommands).map((command) => command) + ) + const hasPrefix = this.hasPrefix(ctx.message?.text ?? '') + if (hasPrefix) { + return true + } + return hasCommand + } + + private hasPrefix (prompt: string): boolean { + return false + } + + public getEstimatedPrice (ctx: any): number { + return 0 + } + + public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { + ctx.transient.analytics.module = this.module + if (!this.isSupportedEvent(ctx)) { + this.logger.warn(`### unsupported command ${ctx.message?.text}`) + return + } + + // if (ctx.hasCommand(SupportedCommands.docs)) { + // await this.onDocsMenu(ctx) + // return + // } + + if (ctx.hasCommand(SupportedCommands.preview)) { + await this.onBroadcast(ctx, true) + return + } + + if (ctx.hasCommand(SupportedCommands.broadcast)) { + await this.onBroadcast(ctx) + return + } + + this.logger.warn('### unsupported command') + await ctx.reply('### unsupported command', { message_thread_id: ctx.message?.message_thread_id }) + ctx.transient.analytics.actualResponseTime = now() + ctx.transient.analytics.sessionState = RequestState.Error + } + + // onDocsMenu = async (ctx: OnMessageContext | OnCallBackQueryData): Promise => { + // const keyboard = new InlineKeyboard() + // let menu = '' + // const isPrivate = ctx.chat?.type === 'private' + // const linksPreview = docsMenu.length > 1 + // if (isPrivate) { + // docsMenu.forEach(item => { + // keyboard.webApp(item.label, item.url).row() + // }) + // } else { + // menu = '\n' + // docsMenu.forEach(item => { + // menu += `[${item.label}](${item.url})\n` + // }) + // } + // await ctx.reply(`${docsMenuLabel}\n${menu}`, { + // reply_markup: isPrivate ? keyboard : undefined, + // parse_mode: 'Markdown', + // link_preview_options: { is_disabled: linksPreview }, + // message_thread_id: ctx.message?.message_thread_id + // }) + // ctx.transient.analytics.actualResponseTime = now() + // ctx.transient.analytics.sessionState = RequestState.Success + // } + + onBroadcast = async (ctx: OnMessageContext | OnCallBackQueryData, isPreview = false): Promise => { + const chatErrors: BroadcastError[] = [] + let errorMessage = '' + if (await isAdmin(ctx, false, true) && ctx.chat?.type === 'private') { + if (!ctx.match) { + await ctx.reply('Missing broadcast message', { message_thread_id: ctx.message?.message_thread_id }) + ctx.transient.analytics.sessionState = RequestState.Error + ctx.transient.analytics.actualResponseTime = now() + return + } + const urls = ctx.entities('url') + const linksPreview = urls.length > 1 + if (isPreview) { + await ctx.reply( + ctx.match as string, + { + parse_mode: 'Markdown', + link_preview_options: { is_disabled: linksPreview } + }) + return + } + const chatsArray = await statsService.getAllChatId() + // const chatsArray = [ + // 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + // 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + // 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + // 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + // 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + // 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + // 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + // 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + // 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + // 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + // 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + // 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + // 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + // 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + // 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + // 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, + // 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, + // 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, + // 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, + // 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400 + // ] + let counter = 0 + const batchSize = 29 + const delayMs = 2000 + for (let i = 0; i < chatsArray.length; i += batchSize) { + const batch = chatsArray.slice(i, i + batchSize) + + await Promise.all(batch.map(async chat => { + if (chat !== ctx.chat?.id) { + try { + await ctx.api.sendMessage( + chat, + ctx.match as string, + { + parse_mode: 'Markdown', + link_preview_options: { is_disabled: linksPreview } + }) + counter++ + } catch (e) { + if (e instanceof GrammyError) { + chatErrors.push({ + chatId: chat, + errorMessage: e.message + }) + } else { + chatErrors.push({ + chatId: chat, + errorMessage: '' + }) + } + } + } + })) + if (i + batchSize < chatsArray.length) { + this.logger.info(`Sleeping for ${delayMs}ms after sending ${batchSize} messages`) + await sleep(delayMs) + } + } + ctx.session.lastBroadcast = ctx.match as string + if (chatErrors.length > 0) { + errorMessage += '\n*Errors:*\n' + chatErrors.forEach(error => { + errorMessage += `${error.chatId}: ${error.errorMessage}\n` + }) + } + await ctx.reply(`Broadcast send successfully to ${counter} chats. ${errorMessage}`, + { parse_mode: 'Markdown' }) + } else { + await ctx.reply('This command is reserved', { message_thread_id: ctx.message?.message_thread_id }) + ctx.transient.analytics.sessionState = RequestState.Error + ctx.transient.analytics.actualResponseTime = now() + } + } + + async onError ( + ctx: OnMessageContext | OnCallBackQueryData, + ex: any, + retryCount: number = MAX_TRIES, + msg?: string + ): Promise { + ctx.transient.analytics.sessionState = RequestState.Error + Sentry.setContext('open-ai', { retryCount, msg }) + Sentry.captureException(ex) + if (retryCount === 0) { + // Retry limit reached, log an error or take alternative action + this.logger.error(`Retry limit reached for error: ${ex}`) + return + } + if (ex instanceof GrammyError) { + if (ex.error_code === 400 && ex.description.includes('not enough rights')) { + await sendMessage( + ctx, + 'Error: The bot does not have permission to send photos in chat' + ) + ctx.transient.analytics.actualResponseTime = now() + } else if (ex.error_code === 429) { + const retryAfter = ex.parameters.retry_after + ? ex.parameters.retry_after < 60 + ? 60 + : ex.parameters.retry_after * 2 + : 60 + const method = ex.method + const errorMessage = `On method "${method}" | ${ex.error_code} - ${ex.description}` + this.logger.error(errorMessage) + await sendMessage( + ctx, + `${ + ctx.from.username ? ctx.from.username : '' + } Bot has reached limit, wait ${retryAfter} seconds` + ).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }) + ctx.transient.analytics.actualResponseTime = now() + if (method === 'editMessageText') { + ctx.session.chatGpt.chatConversation.pop() // deletes last prompt + } + await sleep(retryAfter * 1000) // wait retryAfter seconds to enable bot + } else { + this.logger.error( + `On method "${ex.method}" | ${ex.error_code} - ${ex.description}` + ) + } + } else { + this.logger.error(`${ex.toString()}`) + await sendMessage(ctx, 'Error handling your request') + .catch(async (e) => { await this.onError(ctx, e, retryCount - 1) } + ) + ctx.transient.analytics.actualResponseTime = now() + } + } +} diff --git a/src/modules/hmny/types.ts b/src/modules/hmny/types.ts new file mode 100644 index 00000000..33329a12 --- /dev/null +++ b/src/modules/hmny/types.ts @@ -0,0 +1,4 @@ +export interface BroadcastError { + chatId: number + errorMessage: string +} diff --git a/src/modules/llms/api/openai.ts b/src/modules/llms/api/openai.ts index 3bb1cbe9..085271b2 100644 --- a/src/modules/llms/api/openai.ts +++ b/src/modules/llms/api/openai.ts @@ -130,7 +130,7 @@ export const streamChatCompletion = async ( model, messages: messages as ChatCompletionMessageParam[], // OpenAI.Chat.Completions.CreateChatCompletionRequestMessage[], stream: true, - max_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined, + max_completion_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined, // max_tokens: temperature: config.openAi.dalle.completions.temperature || 0.8 }) let wordCount = 0 diff --git a/src/modules/llms/claudeBot.ts b/src/modules/llms/claudeBot.ts index ab432978..bfae09ba 100644 --- a/src/modules/llms/claudeBot.ts +++ b/src/modules/llms/claudeBot.ts @@ -4,18 +4,17 @@ import { type OnCallBackQueryData, type ChatConversation } from '../types' -import { hasCommandPrefix, SupportedCommands } from './utils/helpers' +import { SupportedCommands } from './utils/helpers' import { type LlmCompletion } from './api/llmApi' import { anthropicCompletion, anthropicStreamCompletion, toolsChatCompletion } from './api/athropic' import { LlmsBase } from './llmsBase' import { type ModelVersion } from './utils/llmModelsManager' export class ClaudeBot extends LlmsBase { - private readonly opusPrefix: string[] + private readonly claudeModels: ModelVersion[] constructor (payments: BotPayments) { super(payments, 'ClaudeBot', 'llms') - this.opusPrefix = this.modelManager.getPrefixByModel(this.modelsEnum.CLAUDE_3_OPUS) ?? [] } public getEstimatedPrice (ctx: any): number { @@ -25,18 +24,7 @@ export class ClaudeBot extends LlmsBase { public isSupportedEvent ( ctx: OnMessageContext | OnCallBackQueryData ): boolean { - const hasCommand = ctx.hasCommand([ - this.commandsEnum.CLAUDE, - this.commandsEnum.OPUS, - this.commandsEnum.O, - this.commandsEnum.C, - this.commandsEnum.CTOOL, - this.commandsEnum.STOOL, - this.commandsEnum.CLAUDES, - this.commandsEnum.SONNET, - this.commandsEnum.S, - this.commandsEnum.HAIKU, - this.commandsEnum.H]) + const hasCommand = ctx.hasCommand(this.supportedCommands) if (ctx.hasCommand(SupportedCommands.new) && this.checkModel(ctx)) { return true @@ -48,12 +36,6 @@ export class ClaudeBot extends LlmsBase { return hasCommand } - hasPrefix (prompt: string): string { - return ( - hasCommandPrefix(prompt, this.opusPrefix) - ) - } - async chatStreamCompletion ( conversation: ChatConversation[], model: ModelVersion, @@ -87,16 +69,7 @@ export class ClaudeBot extends LlmsBase { this.logger.warn(`### unsupported command ${ctx.message?.text}`) return } - if (ctx.hasCommand([this.commandsEnum.CTOOL])) { - this.updateSessionModel(ctx, this.modelsEnum.CLAUDE_3_OPUS) - await this.onChat(ctx, this.modelsEnum.CLAUDE_3_OPUS, false, true) - return - } - if (ctx.hasCommand([this.commandsEnum.STOOL])) { - this.updateSessionModel(ctx, this.modelsEnum.CLAUDE_35_SONNET) - await this.onChat(ctx, this.modelsEnum.CLAUDE_35_SONNET, false, true) - return - } + if ( (ctx.hasCommand(SupportedCommands.new) && this.checkModel(ctx)) ) { @@ -104,25 +77,15 @@ export class ClaudeBot extends LlmsBase { await this.onChat(ctx, this.modelsEnum.CLAUDE_3_OPUS, true, false) return } - if (ctx.hasCommand([ - this.commandsEnum.CLAUDE, - this.commandsEnum.OPUS, - this.commandsEnum.O, - this.commandsEnum.C]) || - (hasCommandPrefix(ctx.message?.text ?? '', this.opusPrefix) !== '') - ) { - this.updateSessionModel(ctx, this.modelsEnum.CLAUDE_3_OPUS) - await this.onChat(ctx, this.modelsEnum.CLAUDE_3_OPUS, true, false) - return - } - if (ctx.hasCommand([this.commandsEnum.CLAUDES, this.commandsEnum.SONNET, this.commandsEnum.S])) { - this.updateSessionModel(ctx, this.modelsEnum.CLAUDE_35_SONNET) - await this.onChat(ctx, this.modelsEnum.CLAUDE_35_SONNET, true, false) + + const model = this.getModelFromContext(ctx) + if (!model) { + this.logger.warn(`### unsupported model for command ${ctx.message?.text}`) return } - if (ctx.hasCommand([this.commandsEnum.HAIKU, this.commandsEnum.H])) { - this.updateSessionModel(ctx, this.modelsEnum.CLAUDE_3_HAIKU) - await this.onChat(ctx, this.modelsEnum.CLAUDE_3_HAIKU, false, false) - } + this.updateSessionModel(ctx, model.version) + + const usesTools = ctx.hasCommand([this.commandsEnum.CTOOL, this.commandsEnum.STOOL]) + await this.onChat(ctx, model.version, usesTools ? false : this.getStreamOption(model.version), usesTools) } } diff --git a/src/modules/llms/dalleBot.ts b/src/modules/llms/dalleBot.ts index 0348f3ce..03c10483 100644 --- a/src/modules/llms/dalleBot.ts +++ b/src/modules/llms/dalleBot.ts @@ -160,6 +160,7 @@ export class DalleBot extends LlmsBase { ctx.transient.analytics.module = this.module const session = this.getSession(ctx) const isSupportedEvent = this.isSupportedEvent(ctx) + if (!isSupportedEvent && ctx.chat?.type !== 'private') { this.logger.warn(`### unsupported command ${ctx.message?.text}`) return diff --git a/src/modules/llms/llmsBase.ts b/src/modules/llms/llmsBase.ts index a3b81e19..ccadfaba 100644 --- a/src/modules/llms/llmsBase.ts +++ b/src/modules/llms/llmsBase.ts @@ -27,7 +27,7 @@ import { import { type LlmCompletion, deleteCollection } from './api/llmApi' import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' -import { type LLMModel } from './utils/types' +import { type ChatModel, type LLMModel } from './utils/types' import { ErrorHandler } from '../errorhandler' import { SubagentBase } from '../subagents/subagentBase' import { @@ -48,7 +48,11 @@ export abstract class LlmsBase implements PayableBot { protected commandsEnum = LlmCommandsEnum protected subagents: SubagentBase[] protected botSuspended: boolean - protected supportedModels: LLMModel[] // LlmsModelsEnum[] + protected supportedModels: LLMModel[] + protected supportedCommands: string[] + protected supportedPrefixes: string[] + protected botName: string + errorHandler: ErrorHandler constructor (payments: BotPayments, @@ -57,6 +61,7 @@ export abstract class LlmsBase implements PayableBot { subagents?: SubagentBase[] ) { this.module = module + this.botName = module this.logger = pino({ name: this.module, transport: { @@ -70,9 +75,29 @@ export abstract class LlmsBase implements PayableBot { this.payments = payments this.subagents = subagents ?? [] this.errorHandler = new ErrorHandler() + this.supportedModels = this.initSupportedModels() + this.supportedCommands = this.initSupportedCommands() + this.supportedPrefixes = this.initSupportedPrefixes() + } + + private initSupportedModels (): LLMModel[] { + return this.modelManager.getModelsByBot(this.botName) + } + + private initSupportedCommands (): string[] { + return this.supportedModels + .filter(model => model.botName === this.botName) + .flatMap(model => model.commands) + } + + private initSupportedPrefixes (): string[] { + return this.supportedModels + .filter(model => model.botName === this.botName) + .flatMap(model => this.modelManager.getPrefixByModel(model.version) ?? []) } public abstract onEvent (ctx: OnMessageContext | OnCallBackQueryData, refundCallback: (reason?: string) => void): Promise + public abstract isSupportedEvent ( ctx: OnMessageContext | OnCallBackQueryData ): boolean @@ -92,7 +117,29 @@ export abstract class LlmsBase implements PayableBot { usesTools: boolean ): Promise - protected abstract hasPrefix (prompt: string): string + // protected abstract hasPrefix (prompt: string): string + protected hasPrefix (prompt: string): string { + return this.supportedPrefixes.find(prefix => prompt.toLocaleLowerCase().startsWith(prefix)) ?? '' + } + + protected getStreamOption (model: ModelVersion): boolean { + const foundModel = this.supportedModels.find(m => m.version === model) as ChatModel | undefined + return foundModel?.stream ?? false + } + + protected getModelFromContext (ctx: OnMessageContext | OnCallBackQueryData): LLMModel | undefined { + for (const model of this.supportedModels) { + if (model.botName !== this.botName) continue + if (ctx.hasCommand(model.commands)) { + return model + } + const prefix = this.modelManager.getPrefixByModel(model.version) + if (prefix && prefix.some(p => (ctx.message?.text ?? '').startsWith(p))) { + return model + } + } + return undefined + } addSubagents (subagents: SubagentBase[]): void { this.subagents = subagents @@ -110,7 +157,7 @@ export abstract class LlmsBase implements PayableBot { return !!this.supportedModels.find(model => model.version === ctx.session.currentModel) } - protected async runSubagents (ctx: OnMessageContext | OnCallBackQueryData, msg: ChatConversation): Promise { + protected async runSubagents (ctx: OnMessageContext | OnCallBackQueryData, msg: ChatConversation, stream: boolean, usesTools: boolean): Promise { const session = this.getSession(ctx) await Promise.all(this.subagents.map(async (agent: SubagentBase) => await agent.run(ctx, msg))) @@ -119,7 +166,7 @@ export abstract class LlmsBase implements PayableBot { session.requestQueue.push(msg) if (!session.isProcessingQueue) { session.isProcessingQueue = true - await this.onChatRequestHandler(ctx, true, false).then(() => { + await this.onChatRequestHandler(ctx, stream, usesTools).then(() => { session.isProcessingQueue = false }) } @@ -167,7 +214,7 @@ export abstract class LlmsBase implements PayableBot { content: prompt as string ?? '', // await preparePrompt(ctx, prompt as string), numSubAgents: supportedAgents } - await this.runSubagents(ctx, msg) // prompt as string) + await this.runSubagents(ctx, msg, stream, usesTools) // prompt as string) } ctx.transient.analytics.actualResponseTime = now() } catch (e: any) { diff --git a/src/modules/llms/openaiBot.ts b/src/modules/llms/openaiBot.ts index d5a860ad..defe68b9 100644 --- a/src/modules/llms/openaiBot.ts +++ b/src/modules/llms/openaiBot.ts @@ -6,7 +6,6 @@ import { RequestState } from '../types' import { - hasCommandPrefix, hasNewPrefix, isMentioned, sendMessage, @@ -30,7 +29,7 @@ export class OpenAIBot extends LlmsBase { constructor (payments: BotPayments, subagents?: SubagentBase[]) { super(payments, 'OpenAIBot', 'chatGpt', subagents) - this.gpt4oPrefix = this.modelManager.getPrefixByModel(this.modelsEnum.GPT_4O) ?? [] + // this.gpt4oPrefix = this.modelManager.getPrefixByModel(this.modelsEnum.GPT_4O) ?? [] if (!config.openAi.dalle.isEnabled) { this.logger.warn('DALLΒ·E 2 Image Bot is disabled in config') } @@ -49,7 +48,7 @@ export class OpenAIBot extends LlmsBase { public isSupportedEvent ( ctx: OnMessageContext | OnCallBackQueryData ): boolean { - const commands = ['last', ...this.modelManager.getCommandsByProvider('openai')] + const commands = ['last', ...this.supportedCommands] const hasCommand = ctx.hasCommand(commands) if (ctx.hasCommand(SupportedCommands.new) && this.checkModel(ctx)) { return true @@ -90,7 +89,8 @@ export class OpenAIBot extends LlmsBase { hasPrefix (prompt: string): string { return ( - hasCommandPrefix(prompt, this.gpt4oPrefix) || hasNewPrefix(prompt) // hasDallePrefix(prompt) + this.supportedPrefixes.find(prefix => prompt.toLocaleLowerCase().startsWith(prefix)) ?? + hasNewPrefix(prompt) // hasDallePrefix(prompt) ) } @@ -106,19 +106,9 @@ export class OpenAIBot extends LlmsBase { return } - if ( - ctx.hasCommand([ - this.commandsEnum.CHAT, - this.commandsEnum.ASK, - this.commandsEnum.GPT, - this.commandsEnum.GPTO - ]) || - hasCommandPrefix(ctx.message?.text ?? '', this.gpt4oPrefix) || - isMentioned(ctx) || - ((ctx.message?.text?.startsWith('chat ') ?? + if ((ctx.message?.text?.startsWith('chat ') ?? ctx.message?.text?.startsWith('ask ')) && - ctx.chat?.type === 'private') - ) { + ctx.chat?.type === 'private') { this.updateSessionModel(ctx, this.modelsEnum.GPT_4O) await this.onChat(ctx, this.modelsEnum.GPT_4O, true, false) return @@ -140,30 +130,36 @@ export class OpenAIBot extends LlmsBase { return } - if (ctx.hasCommand(this.commandsEnum.ASK35)) { - this.updateSessionModel(ctx, this.modelsEnum.GPT_35_TURBO) - await this.onChat(ctx, this.modelsEnum.GPT_35_TURBO, true, false) - return - } + // if (ctx.hasCommand(this.commandsEnum.ASK35)) { + // this.updateSessionModel(ctx, this.modelsEnum.GPT_35_TURBO) + // await this.onChat(ctx, this.modelsEnum.GPT_35_TURBO, true, false) + // return + // } + + // if (ctx.hasCommand(this.commandsEnum.GPT4)) { + // this.updateSessionModel(ctx, this.modelsEnum.GPT_4) + // await this.onChat(ctx, this.modelsEnum.GPT_4, true, false) + // return + // } + + // if (ctx.hasCommand([this.commandsEnum.O1, this.commandsEnum.ASK1])) { + // this.updateSessionModel(ctx, this.modelsEnum.O1) + // await this.onChat(ctx, this.modelsEnum.O1, false, false) + // return + // } - if (ctx.hasCommand(this.commandsEnum.GPT4)) { - this.updateSessionModel(ctx, this.modelsEnum.GPT_4) - await this.onChat(ctx, this.modelsEnum.GPT_4, true, false) + const model = this.getModelFromContext(ctx) + if (model) { + this.updateSessionModel(ctx, model.version) + await this.onChat(ctx, model.version, this.getStreamOption(model.version), false) return } - // if (ctx.hasCommand(this.commandsEnum.ASK32)) { // this.updateSessionModel(ctx, this.modelsEnum.GPT_4_32K) // await this.onChat(ctx, this.modelsEnum.GPT_4_32K, true, false) // return // } - if (ctx.hasCommand([this.commandsEnum.O1, this.commandsEnum.ASK1])) { - this.updateSessionModel(ctx, this.modelsEnum.O1) - await this.onChat(ctx, this.modelsEnum.O1, false, false) - return - } - if (ctx.hasCommand(SupportedCommands.last)) { await this.onLast(ctx) return diff --git a/src/modules/llms/utils/llmModelsManager.ts b/src/modules/llms/utils/llmModelsManager.ts index 2d277c33..f1cddf84 100644 --- a/src/modules/llms/utils/llmModelsManager.ts +++ b/src/modules/llms/utils/llmModelsManager.ts @@ -13,7 +13,6 @@ export class LLMModelsManager { constructor (llmData: LLMData) { this.loadModels(llmData) - console.log(this.models) this.modelsEnum = this.createModelsEnum() this.commandsEnum = this.createCommandsEnum() } @@ -130,12 +129,6 @@ export class LLMModelsManager { return this.models.get(version)?.prefix } - getModelByCommand (command: string): LLMModel | undefined { - return Array.from(this.models.values()).find(model => - model.commands.includes(command) - ) - } - generateTelegramOutput (): string { let output = '' const providers = Array.from(new Set(this.getAllModels().map(model => model.provider))) diff --git a/src/modules/llms/utils/llmsData.ts b/src/modules/llms/utils/llmsData.ts index c089ddf3..4b5a4f60 100644 --- a/src/modules/llms/utils/llmsData.ts +++ b/src/modules/llms/utils/llmsData.ts @@ -2,18 +2,6 @@ import { type LLMData } from './types' export const llmData: LLMData = { chatModels: { - // 'chat-bison': { - // provider: 'vertex', - // name: 'chat-bison', - // fullName: 'chat-bison', - // version: 'chat-bison', - // commands: ['bison', 'b'], - // apiSpec: 'https://example.com/chat-bison-api-spec', - // inputPrice: 0.03, - // outputPrice: 0.06, - // maxContextTokens: 8192, - // chargeType: 'CHAR' - // }, 'gemini-10': { provider: 'vertex', name: 'gemini-10', @@ -26,7 +14,8 @@ export const llmData: LLMData = { inputPrice: 0.000125, outputPrice: 0.000375, maxContextTokens: 30720, - chargeType: 'CHAR' + chargeType: 'CHAR', + stream: true }, 'gemini-15': { provider: 'vertex', @@ -39,20 +28,9 @@ export const llmData: LLMData = { inputPrice: 0.0025, outputPrice: 0.0075, maxContextTokens: 1048576, - chargeType: 'CHAR' + chargeType: 'CHAR', + stream: true }, - // 'j2-ultra': { - // provider: 'jurassic', - // name: 'j2_Ultra', - // fullName: 'j2-ultra', - // version: 'j2-ultra', - // commands: ['j2ultra'], - // apiSpec: 'https://example.com/j2-ultra-api-spec', - // inputPrice: 0.06, - // outputPrice: 0.12, - // maxContextTokens: 32000, - // chargeType: 'TOKEN' - // }, 'claude-3-opus': { provider: 'claude', name: 'claude-3-opus', @@ -65,7 +43,8 @@ export const llmData: LLMData = { inputPrice: 0.015, outputPrice: 0.075, maxContextTokens: 4096, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, 'claude-35-sonnet': { provider: 'claude', @@ -74,11 +53,13 @@ export const llmData: LLMData = { botName: 'ClaudeBot', version: 'claude-3-5-sonnet-20240620', commands: ['sonnet', 'claudes', 's', 'stool'], + prefix: ['s. '], apiSpec: 'https://www.anthropic.com/news/claude-3-5-sonnet', inputPrice: 0.003, outputPrice: 0.015, maxContextTokens: 8192, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, 'claude-3-haiku': { provider: 'claude', @@ -87,11 +68,13 @@ export const llmData: LLMData = { botName: 'ClaudeBot', version: 'claude-3-haiku-20240307', commands: ['haiku', 'h'], + prefix: ['h. '], apiSpec: 'https://www.anthropic.com/news/claude-3-family', inputPrice: 0.00025, outputPrice: 0.00125, maxContextTokens: 4096, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, 'gpt-4': { provider: 'openai', @@ -104,20 +87,9 @@ export const llmData: LLMData = { inputPrice: 0.03, outputPrice: 0.06, maxContextTokens: 8192, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, - // 'gpt-4-32k': { - // provider: 'openai', - // name: 'gpt-4-32k', - // fullName: 'GPT-4 32k', - // version: 'gpt-4-32k', - // commands: ['gpt4-32k', 'ask32'], - // apiSpec: 'https://example.com/gpt-4-32k-api-spec', - // inputPrice: 0.06, - // outputPrice: 0.12, - // maxContextTokens: 32000, - // chargeType: 'TOKEN' - // }, 'gpt-35-turbo': { provider: 'openai', name: 'gpt-35-turbo', @@ -129,31 +101,23 @@ export const llmData: LLMData = { inputPrice: 0.0015, outputPrice: 0.002, maxContextTokens: 4000, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, - // 'gpt-35-turbo-16k': { - // provider: 'openai', - // name: 'GPT-3.5 Turbo 16k', - // version: 'gpt-3.5-turbo-16k', - // commands: ['gpt35-16k'], - // apiSpec: 'https://example.com/gpt-3.5-turbo-16k-api-spec', - // inputPrice: 0.003, - // outputPrice: 0.004, - // maxContextTokens: 16000, - // chargeType: 'TOKEN' - // }, 'gpt-4-vision': { provider: 'openai', name: 'gpt-4-vision', fullName: 'GPT-4 Vision', botName: 'OpenAIBot', version: 'gpt-4-vision-preview', - commands: ['vision'], + commands: ['vision', 'v'], + prefix: ['v. '], apiSpec: 'https://platform.openai.com/docs/guides/vision', inputPrice: 0.03, outputPrice: 0.06, maxContextTokens: 16000, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, 'gpt-4o': { provider: 'openai', @@ -161,13 +125,14 @@ export const llmData: LLMData = { fullName: 'GPT-4o', botName: 'OpenAIBot', version: 'gpt-4o', - commands: ['gpto', 'ask', 'chat', 'gpt'], + commands: ['gpto', 'ask', 'chat', 'gpt', 'a'], prefix: ['a. ', '. '], apiSpec: 'https://platform.openai.com/docs/models/gpt-4o', inputPrice: 0.005, outputPrice: 0.0015, maxContextTokens: 128000, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: true }, o1: { provider: 'openai', @@ -176,11 +141,13 @@ export const llmData: LLMData = { botName: 'OpenAIBot', version: 'o1-preview', commands: ['o1', 'ask1'], + prefix: ['o1. '], apiSpec: 'https://platform.openai.com/docs/models/o1', inputPrice: 0.015, outputPrice: 0.06, maxContextTokens: 128000, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: false }, 'o1-mini': { provider: 'openai', @@ -193,7 +160,8 @@ export const llmData: LLMData = { inputPrice: 0.003, outputPrice: 0.012, maxContextTokens: 128000, - chargeType: 'TOKEN' + chargeType: 'TOKEN', + stream: false } }, imageModels: { diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 36a59e07..58561d56 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -20,6 +20,7 @@ export interface ChatModel extends BaseModel { outputPrice: number maxContextTokens: number chargeType: ChargeType + stream: boolean } export interface ImageModel extends BaseModel { diff --git a/src/modules/llms/vertexBot.ts b/src/modules/llms/vertexBot.ts index 0c8c375d..384b79f4 100644 --- a/src/modules/llms/vertexBot.ts +++ b/src/modules/llms/vertexBot.ts @@ -5,7 +5,6 @@ import { type ChatConversation } from '../types' import { - hasCommandPrefix, isMentioned, SupportedCommands } from './utils/helpers' @@ -14,16 +13,11 @@ import { type LlmCompletion } from './api/llmApi' import { LlmsBase } from './llmsBase' import { vertexCompletion, vertexStreamCompletion } from './api/vertex' import { type SubagentBase } from '../subagents' -import { - LlmModelsEnum, - type ModelVersion -} from './utils/llmModelsManager' +import { type ModelVersion } from './utils/llmModelsManager' export class VertexBot extends LlmsBase { - private readonly geminiPrefix: string[] constructor (payments: BotPayments, subagents?: SubagentBase[]) { super(payments, 'VertexBot', 'llms', subagents) - this.geminiPrefix = this.modelManager.getPrefixByModel(LlmModelsEnum.GEMINI_10) ?? [] } public getEstimatedPrice (ctx: any): number { @@ -33,11 +27,7 @@ export class VertexBot extends LlmsBase { public isSupportedEvent ( ctx: OnMessageContext | OnCallBackQueryData ): boolean { - const hasCommand = ctx.hasCommand([ - this.commandsEnum.GEMINI, - this.commandsEnum.G, - this.commandsEnum.G15, - this.commandsEnum.GEMINI15]) + const hasCommand = ctx.hasCommand(this.supportedCommands) if (isMentioned(ctx)) { return true } @@ -69,12 +59,6 @@ export class VertexBot extends LlmsBase { return await vertexCompletion(conversation, model) } - hasPrefix (prompt: string): string { - return ( - hasCommandPrefix(prompt, this.geminiPrefix) - ) - } - public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { ctx.transient.analytics.module = this.module const isSupportedEvent = this.isSupportedEvent(ctx) @@ -82,24 +66,18 @@ export class VertexBot extends LlmsBase { this.logger.warn(`### unsupported command ${ctx.message?.text}`) return } - // if (ctx.hasCommand([SupportedCommands.bard, SupportedCommands.bardF]) || hasBardPrefix(ctx.message?.text ?? '')) { - // this.updateSessionModel(ctx, LlmsModelsEnum.BISON) - // await this.onChat(ctx, LlmsModelsEnum.BISON, false, false) - // return - // } - if (ctx.hasCommand([this.commandsEnum.GEMINI, this.commandsEnum.G]) || (hasCommandPrefix(ctx.message?.text ?? '', this.geminiPrefix))) { - this.updateSessionModel(ctx, LlmModelsEnum.GEMINI_10) - await this.onChat(ctx, LlmModelsEnum.GEMINI_10, true, false) - return - } - if (ctx.hasCommand([this.commandsEnum.GEMINI15, this.commandsEnum.G15])) { - this.updateSessionModel(ctx, LlmModelsEnum.GEMINI_15) - await this.onChat(ctx, LlmModelsEnum.GEMINI_15, true, false) - // return - } if (ctx.hasCommand([SupportedCommands.pdf, SupportedCommands.ctx]) && this.checkModel(ctx)) { await this.onChat(ctx, ctx.session.currentModel, true, false) } + + const model = this.getModelFromContext(ctx) + if (!model) { + this.logger.warn(`### unsupported model for command ${ctx.message?.text}`) + return + } + this.updateSessionModel(ctx, model.version) + + await this.onChat(ctx, model.version, this.getStreamOption(model.version), false) } } diff --git a/src/modules/types.ts b/src/modules/types.ts index efe12154..877a80c0 100644 --- a/src/modules/types.ts +++ b/src/modules/types.ts @@ -163,6 +163,12 @@ export interface SubagentSessionData { isProcessingQueue: boolean subagentsRequestQueue: SubagentResult[] } + +interface VoiceMemoSessionData { + isOneTimeForwardingVoiceEnabled: boolean + isVoiceForwardingEnabled: boolean +} + export interface BotSessionData { oneCountry: OneCountryData collections: CollectionSessionData @@ -172,6 +178,8 @@ export interface BotSessionData { subagents: SubagentSessionData dalle: ImageGenSessionData currentModel: ModelVersion + lastBroadcast: string + voiceMemo: VoiceMemoSessionData } export interface TransientStateContext { diff --git a/src/modules/voice-memo/index.ts b/src/modules/voice-memo/index.ts index ed508672..b5765d19 100644 --- a/src/modules/voice-memo/index.ts +++ b/src/modules/voice-memo/index.ts @@ -14,12 +14,18 @@ import { InputFile } from 'grammy' import { bot } from '../../bot' import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' +import { isAdmin } from '../llms/utils/context' +import { VOICE_MEMO_FORWARDING } from '../../constants' interface TranslationJob { filePath: string publicFileUrl: string } +enum SupportedCommands { + FORWARD = 'forward' +} + export class VoiceMemo implements PayableBot { public readonly module = 'VoiceMemo' private readonly logger: Logger @@ -141,8 +147,7 @@ export class VoiceMemo implements PayableBot { public isSupportedEvent (ctx: OnMessageContext): boolean { const { voice, audio } = ctx.update.message - - return config.voiceMemo.isEnabled && (!!voice || !!audio) + return ctx.hasCommand(Object.values(SupportedCommands)) || config.voiceMemo.isEnabled && (!!voice || !!audio) } public getEstimatedPrice (ctx: OnMessageContext): number { @@ -159,10 +164,31 @@ export class VoiceMemo implements PayableBot { const fileSize = (voice ?? audio)?.file_size const requestKey = `${from.id}_${fileSize}` + if (ctx.hasCommand(SupportedCommands.FORWARD)) { + if (await isAdmin(ctx)) { + ctx.session.voiceMemo.isOneTimeForwardingVoiceEnabled = true + this.logger.info('/forward command') + await ctx.reply(VOICE_MEMO_FORWARDING.enabled, { + link_preview_options: { is_disabled: true }, + message_thread_id: ctx.message?.message_thread_id + }) + return + } + await ctx.reply(VOICE_MEMO_FORWARDING.restricted, { + link_preview_options: { is_disabled: true }, + message_thread_id: ctx.message?.message_thread_id + }) + return + } + this.requestsQueue.set(requestKey, Date.now()) this.logger.info(`onEvent message @${from.username} (${from.id}): ${requestKey}`) + if (ctx.session.voiceMemo.isOneTimeForwardingVoiceEnabled) { + ctx.session.voiceMemo.isOneTimeForwardingVoiceEnabled = false + } + let translationJob for (let i = 0; i < 30 * 60; i++) { diff --git a/src/pages.ts b/src/pages.ts index 7906a5f4..70f403a0 100644 --- a/src/pages.ts +++ b/src/pages.ts @@ -3,7 +3,7 @@ import { chatMainMenu } from './modules/llms/menu/openaiMenu' import { type BotContext } from './modules/types' import { sdImagesMenu } from './modules/sd-images/menu' import { voiceMemoMenu } from './modules/voice-memo/menu' -import { MenuIds, commandsHelpText, menuText } from './constants' +import { MENU_URL_BUTTONS, MenuIds, commandsHelpText, menuText } from './constants' import { BotPayments } from './modules/payment' import { TelegramPayments } from './modules/telegram_payment' @@ -23,6 +23,21 @@ export const getStartMenuText = async (ctx: BotContext): Promise => { return startText } +export const privateChatMainMenu = new Menu(MenuIds.PRIVATE_MAIN_MENU) + .text('πŸ’³ /buy', async (ctx) => { + await telegramPayments.createPaymentInvoice(ctx) + }) + +export const groupsMainMenu = new Menu(MenuIds.MAIN_MENU) + .text('πŸ’³ /buy', async (ctx) => { + await telegramPayments.createPaymentInvoice(ctx) + }) + +for (const button of MENU_URL_BUTTONS) { + privateChatMainMenu.webApp(button.text, button.url) + groupsMainMenu.url(button.text, button.url) +} + export const mainMenu = new Menu(MenuIds.MAIN_MENU) .text('πŸ’³ /buy', async (ctx) => { await telegramPayments.createPaymentInvoice(ctx)