diff --git a/src/bot.ts b/src/bot.ts index abc0a0f..407611e 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -24,8 +24,8 @@ import { import { mainMenu } from './pages' import { TranslateBot } from './modules/translate/TranslateBot' import { VoiceMemo } from './modules/voice-memo' -import { QRCodeBot } from './modules/qrcode/QRCodeBot' -import { SDImagesBot } from './modules/sd-images' +// import { QRCodeBot } from './modules/qrcode/QRCodeBot' +// import { SDImagesBot } from './modules/sd-images' import { OpenAIBot } from './modules/open-ai' import { OneCountryBot } from './modules/1country' import { WalletConnect } from './modules/walletconnect' @@ -242,8 +242,8 @@ bot.use(autoChatAction()) bot.use(mainMenu) const voiceMemo = new VoiceMemo() -const qrCodeBot = new QRCodeBot() -const sdImagesBot = new SDImagesBot() +// const qrCodeBot = new QRCodeBot() +// const sdImagesBot = new SDImagesBot() const walletConnect = new WalletConnect() const payments = new BotPayments() const schedule = new BotSchedule(bot) @@ -364,8 +364,8 @@ const writeCommandLog = async ( const PayableBots: Record = { voiceCommand: { bot: voiceCommand }, - qrCodeBot: { bot: qrCodeBot }, - sdImagesBot: { bot: sdImagesBot }, + // qrCodeBot: { bot: qrCodeBot }, + // sdImagesBot: { bot: sdImagesBot }, voiceTranslate: { bot: voiceTranslateBot }, voiceMemo: { bot: voiceMemo }, translateBot: { bot: translateBot }, @@ -448,24 +448,24 @@ const onMessage = async (ctx: OnMessageContext): Promise => { const onCallback = async (ctx: OnCallBackQueryData): Promise => { try { - if (qrCodeBot.isSupportedEvent(ctx)) { - await qrCodeBot.onEvent(ctx, (reason) => { - logger.error(`qr generate error: ${reason}`) - }) - return - } + // if (qrCodeBot.isSupportedEvent(ctx)) { + // await qrCodeBot.onEvent(ctx, (reason) => { + // logger.error(`qr generate error: ${reason}`) + // }) + // return + // } if (telegramPayments.isSupportedEvent(ctx)) { await telegramPayments.onEvent(ctx) return } - if (sdImagesBot.isSupportedEvent(ctx)) { - await sdImagesBot.onEvent(ctx, (e) => { - logger.info(e, '// TODO refund payment') - }) - return - } + // if (sdImagesBot.isSupportedEvent(ctx)) { + // await sdImagesBot.onEvent(ctx, (e) => { + // logger.info(e, '// TODO refund payment') + // }) + // return + // } if (openAiBot.isSupportedEvent(ctx)) { await openAiBot.onEvent(ctx, (e) => { diff --git a/src/config.ts b/src/config.ts index 34eaae3..2615383 100644 --- a/src/config.ts +++ b/src/config.ts @@ -33,7 +33,7 @@ export default { ? parseInt(process.env.SESSION_TIMEOUT) : 48, // in hours llms: { - apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', // process.env.LLMS_ENDPOINT, // + apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', wordLimit: 50, model: 'chat-bison', minimumBalance: 0, diff --git a/src/constants.ts b/src/constants.ts index da8bdf2..77285df 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -40,12 +40,33 @@ Your credits in 1Bot Credits: $CREDITS Send ONE to: \`$WALLET_ADDRESS\` `, - more: `/ explain like i am 5, what is a superconductor? -. explain like i have a phd, what is category theory? + // more: `/ explain like i am 5, what is a superconductor? + // . explain like i have a phd, what is category theory? + + // /images vintage hot rod with custom flame paint job + + // /qr s.country/ai astronaut, exuberant, anime girl, smile, sky, colorful + + // /connect (Wallet Connect to MetaMask / Gnosis Safe / Timeless) + + // /send TARGET-WALLET-ADDRESS ONE-TOKEN-AMOUNT + // /send 0x742c4788CC47A94cf260abc474E2Fa45695a79Cd 42 -/images vintage hot rod with custom flame paint job + // /memo (Send voice messages via microphone button on bottom right) -/qr s.country/ai astronaut, exuberant, anime girl, smile, sky, colorful + // โค๏ธโ€๐Ÿ”ฅ [Join our team](https://xn--qv9h.s.country/p/dear-engineer-our-tech-lead-role) to build [AI โˆฉ Crypto](https://xn--qv9h.s.country/p/radically-fair-economy-for-1country)! [Product roadmap](https://xn--qv9h.s.country/p/generating-roadmap-as-ceo-vs-cto): + + // [๐Ÿง  Webโˆž](https://xn--qv9h.s.country/p/learning-machine-cryptography): CivitAI custom models (low-rank adaptations, clothes & accessories, human poses, comics & brand characters, video-to-video transformations), Character.AI celebrity chats, RunwayML video clips, HuggingFace embedding ControlNet, Meta segment anything, ElevenLabs speech clones, Zapier task automations, document or website queries. + + // [๐ŸŒณ Web3](https://xn--qv9h.s.country/p/telegram-bots-and-clients-self-custody): self-custody wallets, token swaps, cross-chain bridges, fiat onramps, lending yields, collectible mints, price auctions, multi-signature safes, governance votes, portfolio management, .1 name services. + + // [๐Ÿ Web2](https://xn--qv9h.s.country/p/new-forum-for-ai-crypto-reddit-discourse): news curation, gated access, emoji tipping, collective moments, group discount, social commerce. + + // [๐Ÿดโ€โ˜ ๏ธ Web1](https://xn--qv9h.s.country/p/controlnet-lora-1country-qr-code): .country domains, email aliases, vanity URLs, Notion/Substack hosting. + // ` + // } + more: `/ explain like i am 5, what is a superconductor? +. explain like i have a phd, what is category theory? /connect (Wallet Connect to MetaMask / Gnosis Safe / Timeless) diff --git a/src/modules/llms/api/athropic.ts b/src/modules/llms/api/athropic.ts new file mode 100644 index 0000000..9eccd2c --- /dev/null +++ b/src/modules/llms/api/athropic.ts @@ -0,0 +1,149 @@ +import axios, { type AxiosResponse } from 'axios' +import { type Readable } from 'stream' +import { GrammyError } from 'grammy' +import { pino } from 'pino' + +import config from '../../../config' +import { type OnCallBackQueryData, type OnMessageContext, type ChatConversation } from '../../types' +import { type LlmCompletion } from './llmApi' +import { LlmsModelsEnum } from '../types' + +const logger = pino({ + name: 'anthropic - llmsBot', + transport: { + target: 'pino-pretty', + options: { colorize: true } + } +}) + +const API_ENDPOINT = config.llms.apiEndpoint // 'http://127.0.0.1:5000' // config.llms.apiEndpoint + +export const anthropicCompletion = async ( + conversation: ChatConversation[], + model = LlmsModelsEnum.CLAUDE_OPUS +): Promise => { + logger.info(`Handling ${model} completion`) + + const data = { + model, + stream: false, + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: +config.openAi.chatGpt.maxTokens, + messages: conversation + } + const url = `${API_ENDPOINT}/anthropic/completions` + const response = await axios.post(url, data) + const respJson = JSON.parse(response.data) + if (response) { + const totalInputTokens = respJson.usage.input_tokens + const totalOutputTokens = respJson.usage.output_tokens + const completion = respJson.content + + return { + completion: { + content: completion[0].text, + role: 'assistant', + model + }, + usage: totalOutputTokens + totalInputTokens, + price: 0 + } + } + return { + completion: undefined, + usage: 0, + price: 0 + } +} + +export const anthropicStreamCompletion = async ( + conversation: ChatConversation[], + model = LlmsModelsEnum.CLAUDE_OPUS, + ctx: OnMessageContext | OnCallBackQueryData, + msgId: number, + limitTokens = true +): Promise => { + const data = { + model, + stream: true, // Set stream to true to receive the completion as a stream + system: config.openAi.chatGpt.chatCompletionContext, + max_tokens: limitTokens ? +config.openAi.chatGpt.maxTokens : undefined, + messages: conversation.map(m => { return { content: m.content, role: m.role } }) + } + let wordCount = 0 + let wordCountMinimum = 2 + const url = `${API_ENDPOINT}/anthropic/completions` + if (!ctx.chat?.id) { + throw new Error('Context chat id should not be empty after openAI streaming') + } + const response: AxiosResponse = await axios.post(url, data, { responseType: 'stream' }) + // Create a Readable stream from the response + const completionStream: Readable = response.data + // Read and process the stream + let completion = '' + let outputTokens = '' + let inputTokens = '' + for await (const chunk of completionStream) { + const msg = chunk.toString() + if (msg) { + if (msg.startsWith('Input Token')) { + inputTokens = msg.split('Input Token: ')[1] + } else if (msg.startsWith('Text')) { + wordCount++ + completion += msg.split('Text: ')[1] + if (wordCount > wordCountMinimum) { // if (chunck === '.' && wordCount > wordCountMinimum) { + if (wordCountMinimum < 64) { + wordCountMinimum *= 2 + } + completion = completion.replaceAll('...', '') + completion += '...' + wordCount = 0 + if (ctx.chat?.id) { + await ctx.api + .editMessageText(ctx.chat?.id, msgId, completion) + .catch(async (e: any) => { + if (e instanceof GrammyError) { + if (e.error_code !== 400) { + throw e + } else { + logger.error(e) + } + } else { + throw e + } + }) + } + } + } else if (msg.startsWith('Output Tokens')) { + outputTokens = msg.split('Output Tokens: ')[1] + } + } + } + completion = completion.replaceAll('...', '') + await ctx.api + .editMessageText(ctx.chat?.id, msgId, completion) + .catch((e: any) => { + if (e instanceof GrammyError) { + if (e.error_code !== 400) { + throw e + } else { + logger.error(e) + } + } else { + throw e + } + }) + const totalOutputTokens = outputTokens // response.headers['x-openai-output-tokens'] + const totalInputTokens = inputTokens // response.headers['x-openai-input-tokens'] + return { + completion: { + content: completion, + role: 'assistant', + model + }, + usage: parseInt(totalOutputTokens, 10) + parseInt(totalInputTokens, 10), + price: 0, + inputTokens: parseInt(totalInputTokens, 10), + outputTokens: parseInt(totalOutputTokens, 10) + } +} diff --git a/src/modules/llms/api/llmApi.ts b/src/modules/llms/api/llmApi.ts index 2348a93..0169495 100644 --- a/src/modules/llms/api/llmApi.ts +++ b/src/modules/llms/api/llmApi.ts @@ -2,6 +2,8 @@ import axios from 'axios' import config from '../../../config' import { type ChatConversation } from '../../types' import pino from 'pino' +import { LlmsModels, LlmsModelsEnum } from '../types' +import { type ChatModel } from '../../open-ai/types' const API_ENDPOINT = config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint @@ -17,6 +19,8 @@ export interface LlmCompletion { completion: ChatConversation | undefined usage: number price: number + inputTokens?: number + outputTokens?: number } interface LlmAddUrlDocument { @@ -32,6 +36,10 @@ interface QueryUrlDocument { conversation?: ChatConversation[] } +export const getChatModel = (modelName: string): ChatModel => { + return LlmsModels[modelName] +} + export const llmAddUrlDocument = async (args: LlmAddUrlDocument): Promise => { const data = { ...args } const endpointUrl = `${API_ENDPOINT}/collections/document` @@ -86,7 +94,7 @@ export const deleteCollection = async (collectionName: string): Promise => export const llmCompletion = async ( conversation: ChatConversation[], - model = config.llms.model + model = LlmsModelsEnum.BISON ): Promise => { const data = { model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo', diff --git a/src/modules/llms/helpers.ts b/src/modules/llms/helpers.ts index 0ea4e4a..9d82dd7 100644 --- a/src/modules/llms/helpers.ts +++ b/src/modules/llms/helpers.ts @@ -8,10 +8,20 @@ import { import { type ParseMode } from 'grammy/types' import { LlmsModelsEnum } from './types' import { type Message } from 'grammy/out/types' -import { llmAddUrlDocument } from './api/llmApi' +import { type LlmCompletion, getChatModel, llmAddUrlDocument } from './api/llmApi' +import { getChatModelPrice } from '../open-ai/api/openAi' +import config from '../../config' export enum SupportedCommands { bardF = 'bard', + claudeOpus = 'claude', + opus = 'opus', + claudeSonnet = 'claudes', + opusShort = 'c', + sonnet = 'sonnet', + sonnetShort = 's', + claudeHaiku = 'haiku', + haikuShort = 'h', bard = 'b', j2Ultra = 'j2-ultra', sum = 'sum', @@ -22,6 +32,7 @@ export enum SupportedCommands { export const MAX_TRIES = 3 const LLAMA_PREFIX_LIST = ['* '] const BARD_PREFIX_LIST = ['b. ', 'B. '] +const CLAUDE_OPUS_PREFIX_LIST = ['c. '] export const isMentioned = ( ctx: OnMessageContext | OnCallBackQueryData @@ -59,6 +70,16 @@ export const hasBardPrefix = (prompt: string): string => { return '' } +export const hasClaudeOpusPrefix = (prompt: string): string => { + const prefixList = CLAUDE_OPUS_PREFIX_LIST + for (let i = 0; i < prefixList.length; i++) { + if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) { + return prefixList[i] + } + } + return '' +} + export const hasUrl = ( ctx: OnMessageContext | OnCallBackQueryData, prompt: string @@ -190,15 +211,22 @@ export const sendMessage = async ( export const hasPrefix = (prompt: string): string => { return ( - hasBardPrefix(prompt) || hasLlamaPrefix(prompt) + hasBardPrefix(prompt) || hasLlamaPrefix(prompt) || hasClaudeOpusPrefix(prompt) ) } -export const getPromptPrice = (completion: string, data: ChatPayload): { price: number, promptTokens: number, completionTokens: number } => { +export const getPromptPrice = (completion: LlmCompletion, data: ChatPayload): { price: number, promptTokens: number, completionTokens: number } => { + const { ctx, model } = data + const modelPrice = getChatModel(model) + const price = + getChatModelPrice(modelPrice, true, completion.inputTokens ?? 0, completion.outputTokens ?? 0) * + config.openAi.chatGpt.priceAdjustment + ctx.session.llms.usage += completion.outputTokens ?? 0 + ctx.session.llms.price += price return { - price: 0, - promptTokens: 10, - completionTokens: 60 + price, + promptTokens: completion.inputTokens ?? 0, + completionTokens: completion.outputTokens ?? 0 } } diff --git a/src/modules/llms/index.ts b/src/modules/llms/index.ts index f8d8bda..c6dd8bc 100644 --- a/src/modules/llms/index.ts +++ b/src/modules/llms/index.ts @@ -19,7 +19,9 @@ import { sleep } from '../sd-images/utils' import { addDocToCollection, addUrlToCollection, + getPromptPrice, hasBardPrefix, + hasClaudeOpusPrefix, hasLlamaPrefix, hasPrefix, hasUrl, @@ -37,6 +39,7 @@ import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' import { AxiosError } from 'axios' import OpenAI from 'openai' +import { anthropicCompletion, anthropicStreamCompletion } from './api/athropic' export class LlmsBot implements PayableBot { public readonly module = 'LlmsBot' private readonly logger: Logger @@ -122,6 +125,22 @@ export class LlmsBot implements PayableBot { return } + if (ctx.hasCommand(SupportedCommands.bard) || ctx.hasCommand(SupportedCommands.bardF)) { + await this.onChat(ctx, LlmsModelsEnum.BISON) + return + } + if (ctx.hasCommand([SupportedCommands.claudeOpus, SupportedCommands.opus, SupportedCommands.opusShort]) || (hasClaudeOpusPrefix(ctx.message?.text ?? '') !== '')) { + await this.onChat(ctx, LlmsModelsEnum.CLAUDE_OPUS) + return + } + if (ctx.hasCommand([SupportedCommands.claudeSonnet, SupportedCommands.sonnet, SupportedCommands.sonnetShort])) { + await this.onChat(ctx, LlmsModelsEnum.CLAUDE_SONNET) + return + } + if (ctx.hasCommand([SupportedCommands.claudeHaiku, SupportedCommands.haikuShort])) { + await this.onChat(ctx, LlmsModelsEnum.CLAUDE_HAIKU) + return + } if (ctx.hasCommand(SupportedCommands.bard) || ctx.hasCommand(SupportedCommands.bardF)) { await this.onChat(ctx, LlmsModelsEnum.BISON) return @@ -530,6 +549,72 @@ export class LlmsBot implements PayableBot { ctx.transient.analytics.actualResponseTime = now() } + private async completionGen (data: ChatPayload, msgId?: number, outputFormat = 'text'): Promise< { price: number, chat: ChatConversation[] }> { + const { conversation, ctx, model } = data + try { + if (!msgId) { + ctx.transient.analytics.firstResponseTime = now() + msgId = ( + await ctx.reply('...', { + message_thread_id: + ctx.message?.message_thread_id ?? + ctx.message?.reply_to_message?.message_thread_id + }) + ).message_id + } + if (outputFormat === 'text') { + const isTypingEnabled = config.openAi.chatGpt.isTypingEnabled + if (isTypingEnabled) { + ctx.chatAction = 'typing' + } + const completion = await anthropicStreamCompletion( + conversation, + model as LlmsModelsEnum, + ctx, + msgId, + true // telegram messages has a character limit + ) + if (isTypingEnabled) { + ctx.chatAction = null + } + if (completion) { + ctx.transient.analytics.sessionState = RequestState.Success + ctx.transient.analytics.actualResponseTime = now() + const price = getPromptPrice(completion, data) + this.logger.info( + `streamChatCompletion result = tokens: ${price.promptTokens + price.completionTokens} | ${model} | price: ${price.price}ยข` // } + ) + conversation.push({ + role: 'assistant', + content: completion.completion?.content ?? '' + }) + return { + price: price.price, + chat: conversation + } + } + } else { + const response = await anthropicCompletion(conversation, model as LlmsModelsEnum) + conversation.push({ + role: 'assistant', + content: response.completion?.content ?? '' + }) + return { + price: response.price, + chat: conversation + } + } + return { + price: 0, + chat: conversation + } + } catch (e: any) { + Sentry.captureException(e) + ctx.chatAction = null + throw e + } + } + private async promptGen (data: ChatPayload): Promise<{ price: number, chat: ChatConversation[] }> { const { conversation, ctx, model } = data if (!ctx.chat?.id) { @@ -547,8 +632,10 @@ export class LlmsBot implements PayableBot { const chat = prepareConversation(conversation, model) if (model === LlmsModelsEnum.BISON) { response = await vertexCompletion(chat, model) // "chat-bison@001"); + } else if (model === LlmsModelsEnum.CLAUDE_OPUS || model === LlmsModelsEnum.CLAUDE_SONNET || model === LlmsModelsEnum.CLAUDE_HAIKU) { + response = await anthropicCompletion(chat, model) } else { - response = await llmCompletion(chat, model) + response = await llmCompletion(chat, model as LlmsModelsEnum) } if (response.completion) { await ctx.api.editMessageText( @@ -568,7 +655,7 @@ export class LlmsBot implements PayableBot { chat: conversation } } - ctx.chatAction = null + // ctx.chatAction = null ctx.transient.analytics.actualResponseTime = now() return { price: 0, @@ -666,7 +753,12 @@ export class LlmsBot implements PayableBot { model: model ?? config.llms.model, ctx } - const result = await this.promptGen(payload) + let result: { price: number, chat: ChatConversation[] } = { price: 0, chat: [] } + if (model === LlmsModelsEnum.CLAUDE_OPUS || model === LlmsModelsEnum.CLAUDE_SONNET) { + result = await this.completionGen(payload) // , prompt.msgId, prompt.outputFormat) + } else { + result = await this.promptGen(payload) + } ctx.session.llms.chatConversation = [...result.chat] if ( !(await this.payments.pay(ctx as OnMessageContext, result.price)) @@ -678,6 +770,7 @@ export class LlmsBot implements PayableBot { await this.onNotBalanceMessage(ctx) } } catch (e: any) { + ctx.session.llms.chatConversation = [] await this.onError(ctx, e) } } @@ -722,6 +815,7 @@ export class LlmsBot implements PayableBot { ctx.transient.analytics.sessionState = RequestState.Error Sentry.setContext('llms', { retryCount, msg }) Sentry.captureException(e) + ctx.chatAction = null if (retryCount === 0) { // Retry limit reached, log an error or take alternative action this.logger.error(`Retry limit reached for error: ${e}`) diff --git a/src/modules/llms/types.ts b/src/modules/llms/types.ts index 1e12c90..54bb775 100644 --- a/src/modules/llms/types.ts +++ b/src/modules/llms/types.ts @@ -3,7 +3,10 @@ import { type ChatModel } from '../open-ai/types' export enum LlmsModelsEnum { GPT_4_32K = 'gpt-4-32k', BISON = 'chat-bison', - J2_ULTRA = 'j2-ultra' + J2_ULTRA = 'j2-ultra', + CLAUDE_OPUS = 'claude-3-opus-20240229', + CLAUDE_SONNET = 'claude-3-sonnet-20240229', + CLAUDE_HAIKU = 'claude-3-haiku-20240307' } export const LlmsModels: Record = { @@ -16,8 +19,8 @@ export const LlmsModels: Record = { }, 'gpt-4-32k': { name: 'gpt-4-32k', - inputPrice: 0.06, - outputPrice: 0.12, + inputPrice: 0.06, // 6 + outputPrice: 0.12, // 12 maxContextTokens: 32000, chargeType: 'TOKEN' }, @@ -27,5 +30,26 @@ export const LlmsModels: Record = { outputPrice: 0.12, maxContextTokens: 32000, chargeType: 'TOKEN' + }, + 'claude-3-opus-20240229': { + name: 'claude-3-opus-20240229', + inputPrice: 0.015, // 15.00 (1M Tokens) => 0.015 (1K tokens) + outputPrice: 0.075, + maxContextTokens: 4096, + chargeType: 'TOKEN' + }, + 'claude-3-sonnet-20240229': { + name: 'claude-3-sonnet-20240229', + inputPrice: 0.003, // 3.00 (1M Tokens) => 0.003 (1K tokens) + outputPrice: 0.015, + maxContextTokens: 4096, + chargeType: 'TOKEN' + }, + 'claude-3-haiku-20240307': { + name: 'claude-3-haiku-20240307', + inputPrice: 0.00025, // 3.00 (1M Tokens) => 0.003 (1K tokens) + outputPrice: 0.00125, + maxContextTokens: 4096, + chargeType: 'TOKEN' } } diff --git a/src/modules/open-ai/index.ts b/src/modules/open-ai/index.ts index 6ba115f..572ac4f 100644 --- a/src/modules/open-ai/index.ts +++ b/src/modules/open-ai/index.ts @@ -963,6 +963,7 @@ export class OpenAIBot implements PayableBot { ctx.transient.analytics.sessionState = RequestState.Error Sentry.setContext('open-ai', { retryCount, msg }) Sentry.captureException(ex) + ctx.chatAction = null if (retryCount === 0) { // Retry limit reached, log an error or take alternative action this.logger.error(`Retry limit reached for error: ${ex}`) diff --git a/src/modules/open-ai/utils/text.ts b/src/modules/open-ai/utils/text.ts index 94c21ee..cf2aa3e 100644 --- a/src/modules/open-ai/utils/text.ts +++ b/src/modules/open-ai/utils/text.ts @@ -10,7 +10,7 @@ Adjust image size or how many images are generated`, endChat: 'Thanks for using 1.country services', gptChatPaymentIssue: 'Once the withdrawal instructions are completed, you can return to the current conversation by writing the */ask* command.', notEnoughBalance: - 'Your credits: $CREDITS ONE tokens. To recharge, send to `$WALLET_ADDRESS`' + 'Your credits: $CREDITS ONE tokens. To recharge, send ONE to `$WALLET_ADDRESS`' } // Edit an Image diff --git a/src/modules/voice-command/index.ts b/src/modules/voice-command/index.ts index fa11f6d..7332ca5 100644 --- a/src/modules/voice-command/index.ts +++ b/src/modules/voice-command/index.ts @@ -115,7 +115,7 @@ export class VoiceCommand implements PayableBot { await this.openAIBot.voiceCommand(ctx, command, resultText) await ctx.api.deleteMessage(ctx.chat.id, progressMessage.message_id) } else { - await ctx.api.editMessageText(ctx.chat.id, progressMessage.message_id, `No command detected. This is what I heard ๐Ÿ˜‰: _${resultText}_`, { parse_mode: 'Markdown' }) + await ctx.api.editMessageText(ctx.chat.id, progressMessage.message_id, `No command detected. This is what I heard: _${resultText}_`, { parse_mode: 'Markdown' }) } } }