diff --git a/package-lock.json b/package-lock.json index 44711d8..254df1d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -2093,7 +2093,7 @@ "version": "0.8.1", "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", - "dev": true, + "devOptional": true, "dependencies": { "@jridgewell/trace-mapping": "0.3.9" }, @@ -3684,7 +3684,7 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", - "dev": true, + "devOptional": true, "engines": { "node": ">=6.0.0" } @@ -3702,13 +3702,13 @@ "version": "1.4.15", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", - "dev": true + "devOptional": true }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.9", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", - "dev": true, + "devOptional": true, "dependencies": { "@jridgewell/resolve-uri": "^3.0.3", "@jridgewell/sourcemap-codec": "^1.4.10" @@ -4222,25 +4222,25 @@ "version": "1.0.9", "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", - "dev": true + "devOptional": true }, "node_modules/@tsconfig/node12": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true + "devOptional": true }, "node_modules/@tsconfig/node14": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true + "devOptional": true }, "node_modules/@tsconfig/node16": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true + "devOptional": true }, "node_modules/@types/babel__core": { "version": "7.20.1", @@ -5230,7 +5230,7 @@ "version": "8.9.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.9.0.tgz", "integrity": "sha512-jaVNAFBHNLXspO543WnNNPZFRtavh3skAkITqD0/2aeMkKZTN+254PyhwxFYrk3vQ1xfY+2wbesJMs/JC8/PwQ==", - "dev": true, + "devOptional": true, "bin": { "acorn": "bin/acorn" }, @@ -5251,7 +5251,7 @@ "version": "8.2.0", "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", - "dev": true, + "devOptional": true, "engines": { "node": ">=0.4.0" } @@ -5406,7 +5406,7 @@ "version": "4.1.3", "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true + "devOptional": true }, "node_modules/argparse": { "version": "2.0.1", @@ -7046,7 +7046,7 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true + "devOptional": true }, "node_modules/cross-fetch": { "version": "3.1.8", @@ -7387,7 +7387,7 @@ "version": "4.0.2", "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "dev": true, + "devOptional": true, "engines": { "node": ">=0.3.1" } @@ -11236,7 +11236,7 @@ "version": "1.3.6", "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", - "dev": true + "devOptional": true }, "node_modules/make-fetch-happen": { "version": "11.1.1", @@ -14748,7 +14748,7 @@ "version": "10.9.1", "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", - "dev": true, + "devOptional": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", "@tsconfig/node10": "^1.0.7", @@ -15267,7 +15267,7 @@ "version": "5.2.2", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", - "dev": true, + "devOptional": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -15503,7 +15503,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", - "dev": true + "devOptional": true }, "node_modules/v8-to-istanbul": { "version": "9.1.0", @@ -16526,7 +16526,7 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true, + "devOptional": true, "engines": { "node": ">=6" } diff --git a/src/bot.ts b/src/bot.ts index 1676391..cbc5282 100644 --- a/src/bot.ts +++ b/src/bot.ts @@ -31,8 +31,6 @@ import { OneCountryBot } from './modules/1country' import { WalletConnect } from './modules/walletconnect' import { BotPayments } from './modules/payment' import { BotSchedule } from './modules/schedule' -import { LlmsBot } from './modules/llms' -import { DocumentHandler } from './modules/document-handler' import config from './config' import { commandsHelpText, FEEDBACK, LOVE, MODELS, SUPPORT, TERMS, LANG } from './constants' import prometheusRegister, { PrometheusMetrics } from './metrics/prometheus' @@ -43,7 +41,6 @@ import { autoRetry } from '@grammyjs/auto-retry' import { run } from '@grammyjs/runner' import { runBotHeartBit } from './monitoring/monitoring' import { type BotPaymentLog } from './database/stats.service' -// import { getChatMemberInfo } from './modules/open-ai/utils/web-crawler' import { TelegramPayments } from './modules/telegram_payment' import * as Sentry from '@sentry/node' import * as Events from 'events' @@ -209,7 +206,9 @@ function createInitialSessionData (): BotSessionData { collections: { activeCollections: [], collectionRequestQueue: [], - isProcessingQueue: false + isProcessingQueue: false, + currentCollection: '', + collectionConversation: [] }, llms: { model: config.llms.model, @@ -244,8 +243,6 @@ const schedule = new BotSchedule(bot) const openAiBot = new OpenAIBot(payments) const oneCountryBot = new OneCountryBot(payments) const translateBot = new TranslateBot() -const llmsBot = new LlmsBot(payments) -const documentBot = new DocumentHandler() const telegramPayments = new TelegramPayments(payments) const voiceTranslateBot = new VoiceTranslateBot(payments) const textToSpeechBot = new TextToSpeechBot(payments) @@ -361,7 +358,6 @@ const PayableBots: Record = { sdImagesBot: { bot: sdImagesBot }, voiceTranslate: { bot: voiceTranslateBot }, voiceMemo: { bot: voiceMemo }, - documentBot: { bot: documentBot }, translateBot: { bot: translateBot }, textToSpeech: { bot: textToSpeechBot }, voiceToText: { bot: voiceToTextBot }, @@ -369,10 +365,6 @@ const PayableBots: Record = { enabled: (ctx: OnMessageContext) => ctx.session.openAi.imageGen.isEnabled, bot: openAiBot }, - llmsBot: { - enabled: (ctx: OnMessageContext) => ctx.session.openAi.imageGen.isEnabled, - bot: llmsBot - }, oneCountryBot: { bot: oneCountryBot } } diff --git a/src/config.ts b/src/config.ts index aa8c0bf..c4f291e 100644 --- a/src/config.ts +++ b/src/config.ts @@ -33,7 +33,7 @@ export default { ? parseInt(process.env.SESSION_TIMEOUT) : 48, // in hours llms: { - apiEndpoint: process.env.LLMS_ENDPOINT ?? '', + apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', wordLimit: 50, model: 'chat-bison', minimumBalance: 0, diff --git a/src/modules/1country/index.ts b/src/modules/1country/index.ts index 78ebfb4..29b8bb4 100644 --- a/src/modules/1country/index.ts +++ b/src/modules/1country/index.ts @@ -16,6 +16,7 @@ import { MAX_TRIES, sendMessage } from '../open-ai/helpers' import { sleep } from '../sd-images/utils' import { isValidUrl } from '../open-ai/utils/web-crawler' import { now } from '../../utils/perf' +import OpenAI from 'openai' export const SupportedCommands = { register: { name: 'rent' }, @@ -525,6 +526,14 @@ export class OneCountryBot implements PayableBot { return input.replace(/[^a-z0-9-]/g, '').toLowerCase() } + async onEnd (ctx: OnMessageContext | OnCallBackQueryData): Promise { + ctx.session.collections.activeCollections = [] + ctx.session.collections.collectionConversation = [] + ctx.session.collections.collectionRequestQueue = [] + ctx.session.collections.currentCollection = '' + ctx.session.collections.isProcessingQueue = false + } + async onError ( ctx: OnMessageContext | OnCallBackQueryData, ex: any, @@ -573,6 +582,21 @@ export class OneCountryBot implements PayableBot { `On method "${ex.method}" | ${ex.error_code} - ${ex.description}` ) } + } else if (ex instanceof OpenAI.APIError) { + // 429 RateLimitError + // e.status = 400 || e.code = BadRequestError + this.logger.error(`OPENAI Error ${ex.status}(${ex.code}) - ${ex.message}`) + if (ex.code === 'context_length_exceeded') { + await sendMessage(ctx, ex.message).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }) + ctx.transient.analytics.actualResponseTime = now() + await this.onEnd(ctx) + } else { + await sendMessage( + ctx, + 'Error accessing OpenAI (ChatGPT). Please try later' + ).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }) + ctx.transient.analytics.actualResponseTime = now() + } } else { this.logger.error(`${ex.toString()}`) await sendMessage(ctx, 'Error handling your request') diff --git a/src/modules/document-handler/index.ts b/src/modules/document-handler/index.ts index 73a33ae..e4e80bc 100644 --- a/src/modules/document-handler/index.ts +++ b/src/modules/document-handler/index.ts @@ -2,11 +2,29 @@ import { type OnMessageContext, type PayableBot, type RefundCallback, RequestSta import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' import { llmAddUrlDocument } from '../llms/api/llmApi' +import { type Logger, pino } from 'pino' +import { GrammyError } from 'grammy' +import { sendMessage } from '../open-ai/helpers' +import { sleep } from '../sd-images/utils' +import { AxiosError } from 'axios' -const SupportedDocuments = { PDF: 'application/pdf' } +// const SupportedDocuments = { PDF: 'application/pdf' } +const MAX_TRIES = 3 export class DocumentHandler implements PayableBot { public readonly module = 'DocumentHandler' + private readonly logger: Logger + + constructor () { + this.logger = pino({ + name: 'OpenAIBot', + transport: { + target: 'pino-pretty', + options: { colorize: true } + } + }) + } + public getEstimatedPrice (ctx: OnMessageContext): number { return 1 } @@ -17,11 +35,11 @@ export class DocumentHandler implements PayableBot { const file = await ctx.getFile() const documentType = ctx.message.document?.mime_type if (documentType === 'application/pdf' && ctx.chat.id) { - const pdfUrl = file.getUrl() + const url = file.getUrl() const fileName = ctx.message.document?.file_name ?? file.file_id - await this.addDocToCollection(ctx, ctx.chat.id, fileName, pdfUrl) + const prompt = ctx.message.caption ?? '' + await this.addDocToCollection(ctx, ctx.chat.id, fileName, url, prompt) } - console.log(file) ctx.transient.analytics.sessionState = RequestState.Success } catch (ex) { Sentry.captureException(ex) @@ -32,26 +50,92 @@ export class DocumentHandler implements PayableBot { } public isSupportedEvent (ctx: OnMessageContext): boolean { - const documentType = ctx.message.document?.mime_type + // const documentType = ctx.message.document?.mime_type - if (documentType !== undefined) { - return Object.values(SupportedDocuments).includes(documentType) - } + // if (documentType !== undefined) { + // return Object.values(SupportedDocuments).includes(documentType) + // } return false } - private async addDocToCollection (ctx: OnMessageContext, chatId: number, fileName: string, pdfUrl: string): Promise { - const collectionName = await llmAddUrlDocument({ - chatId, - pdfUrl, - fileName - }) - ctx.session.collections.collectionRequestQueue.push({ - collectionName, - collectionType: 'PDF', - fileName, - url: pdfUrl - }) - ctx.session.collections.isProcessingQueue = true + private async addDocToCollection (ctx: OnMessageContext, chatId: number, fileName: string, url: string, prompt: string): Promise { + try { + const collectionName = await llmAddUrlDocument({ + chatId, + url, + fileName + }) + ctx.session.collections.collectionRequestQueue.push({ + collectionName, + collectionType: 'PDF', + fileName, + url, + prompt + }) + } catch (e: any) { + await this.onError(ctx, e) + } + } + + async onError ( + ctx: OnMessageContext, + ex: any, + retryCount: number = MAX_TRIES, + msg?: string + ): Promise { + ctx.transient.analytics.sessionState = RequestState.Error + Sentry.setContext('open-ai', { retryCount, msg }) + Sentry.captureException(ex) + if (retryCount === 0) { + // Retry limit reached, log an error or take alternative action + this.logger.error(`Retry limit reached for error: ${ex}`) + return + } + if (ex instanceof GrammyError) { + if (ex.error_code === 400 && ex.description.includes('not enough rights')) { + await sendMessage( + ctx, + 'Error: The bot does not have permission to send photos in chat' + ) + ctx.transient.analytics.actualResponseTime = now() + } else if (ex.error_code === 429) { + const retryAfter = ex.parameters.retry_after + ? ex.parameters.retry_after < 60 + ? 60 + : ex.parameters.retry_after * 2 + : 60 + const method = ex.method + const errorMessage = `On method "${method}" | ${ex.error_code} - ${ex.description}` + this.logger.error(errorMessage) + await sendMessage( + ctx, + `${ + ctx.from.username ? ctx.from.username : '' + } Bot has reached limit, wait ${retryAfter} seconds` + ).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }) + ctx.transient.analytics.actualResponseTime = now() + if (method === 'editMessageText') { + ctx.session.openAi.chatGpt.chatConversation.pop() // deletes last prompt + } + await sleep(retryAfter * 1000) // wait retryAfter seconds to enable bot + } else { + this.logger.error( + `On method "${ex.method}" | ${ex.error_code} - ${ex.description}` + ) + await sendMessage(ctx, 'Error handling your request').catch(async (e) => { + await this.onError(ctx, e, retryCount - 1) + }) + } + } else if (ex instanceof AxiosError) { + await sendMessage(ctx, 'Error handling your request').catch(async (e) => { + await this.onError(ctx, e, retryCount - 1) + }) + } else { + this.logger.error(`${ex.toString()}`) + await sendMessage(ctx, 'Error handling your request') + .catch(async (e) => { await this.onError(ctx, e, retryCount - 1) } + ) + ctx.transient.analytics.actualResponseTime = now() + } } } diff --git a/src/modules/llms/api/llmApi.ts b/src/modules/llms/api/llmApi.ts index a3123f3..d59ee72 100644 --- a/src/modules/llms/api/llmApi.ts +++ b/src/modules/llms/api/llmApi.ts @@ -1,8 +1,8 @@ -import axios, { AxiosError } from 'axios' +import axios from 'axios' import config from '../../../config' import { type ChatConversation } from '../../types' -const API_ENDPOINT = 'http://127.0.0.1:5000' // config.llms.apiEndpoint +const API_ENDPOINT = config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000' export interface LlmCompletion { completion: ChatConversation | undefined @@ -20,45 +20,27 @@ interface LlmAddUrlDocument { interface QueryUrlDocument { collectioName: string prompt: string - conversation?: ChatConversation + conversation?: ChatConversation[] } export const llmAddUrlDocument = async (args: LlmAddUrlDocument): Promise => { - try { - const data = { ...args } - const endpointUrl = `${API_ENDPOINT}/collections/document` - const response = await axios.post(endpointUrl, data) - if (response) { - return response.data - } - return '' - } catch (error: any) { - if (error instanceof AxiosError) { - console.log(error.code) - console.log(error.message) - console.log(error.stack) - } - throw error + const data = { ...args } + const endpointUrl = `${API_ENDPOINT}/collections/document` + const response = await axios.post(endpointUrl, data) + if (response) { + return response.data.collectionName } + return '' } -export const llmCheckCollectionStatus = async (collectionName: string): Promise => { - try { - const data = { collectionName } - const endpointUrl = `${API_ENDPOINT}/collections/document` - const response = await axios.get(endpointUrl, { params: data }) - if (response) { - return response.data.price - } - return -1 - } catch (error: any) { - if (error instanceof AxiosError) { - console.log(error.code) - console.log(error.message) - console.log(error.stack) - } - throw error +export const llmCheckCollectionStatus = async (name: string): Promise => { + const endpointUrl = `${API_ENDPOINT}/collections/document/${name}` // ?collectionName=${collectionName}` + console.log(endpointUrl) + const response = await axios.get(endpointUrl) + if (response) { + return response.data.price } + return -1 } interface QueryUrlDocumentOutput { @@ -67,24 +49,16 @@ interface QueryUrlDocumentOutput { } export const queryUrlDocument = async (args: QueryUrlDocument): Promise => { - try { - const data = { collectionName: args.collectioName, prompt: args.prompt } - const endpointUrl = `${API_ENDPOINT}/collections/query` - const response = await axios.post(endpointUrl, data) - if (response) { - return response.data - } - return { - completion: '', - price: 0 - } - } catch (error: any) { - if (error instanceof AxiosError) { - console.log(error.code) - console.log(error.message) - console.log(error.stack) - } - throw error + const data = { collectionName: args.collectioName, prompt: args.prompt, conversation: args.conversation } + console.log(data.conversation) + const endpointUrl = `${API_ENDPOINT}/collections/query` + const response = await axios.post(endpointUrl, data) + if (response) { + return response.data + } + return { + completion: '', + price: 0 } } @@ -93,66 +67,22 @@ export const llmCompletion = async ( model = config.llms.model ): Promise => { // eslint-disable-next-line no-useless-catch - try { - const data = { - model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo', - stream: false, - messages: conversation - } - const url = `${API_ENDPOINT}/llms/completions` - const response = await axios.post(url, data) - - if (response) { - const totalInputTokens = response.data.usage.prompt_tokens - const totalOutputTokens = response.data.usage.completion_tokens - const completion = response.data.choices - - return { - completion: { - content: completion[0].message?.content, - role: 'system', - model - }, - usage: totalOutputTokens + totalInputTokens, - price: 0 - } - } - return { - completion: undefined, - usage: 0, - price: 0 - } - } catch (error: any) { - throw error - } -} - -export const llmWebCrawler = async ( - prompt: string, - model: string, - chadId: number, - msgId: number, - url: string -): Promise => { - if (!url.startsWith('https://')) { - url = `https://${url}` - } const data = { - prompt, - chatId: '' + chadId, - msgId: '' + msgId, - token: '' + config.telegramBotAuthToken, - url + model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo', + stream: false, + messages: conversation } - const urlApi = `${API_ENDPOINT}/llama-index/text` - const response = await axios.post(urlApi, data) - if (response.data) { - const totalInputTokens = 0 // response.data.usage.prompt_tokens - const totalOutputTokens = 0 // response.data.usage.completion_tokens - const completion = response.data + const url = `${API_ENDPOINT}/llms/completions` + const response = await axios.post(url, data) + + if (response) { + const totalInputTokens = response.data.usage.prompt_tokens + const totalOutputTokens = response.data.usage.completion_tokens + const completion = response.data.choices + return { completion: { - content: completion ?? '', + content: completion[0].message?.content, role: 'system', model }, diff --git a/src/modules/llms/api/vertex.ts b/src/modules/llms/api/vertex.ts index 6c12474..d63a87d 100644 --- a/src/modules/llms/api/vertex.ts +++ b/src/modules/llms/api/vertex.ts @@ -1,47 +1,37 @@ -import axios, { AxiosError } from 'axios' +import axios from 'axios' import config from '../../../config' import { type ChatConversation } from '../../types' import { type LlmCompletion } from './llmApi' -// const API_ENDPOINT = 'http://127.0.0.1:5000' // config.llms.apiEndpoint -const API_ENDPOINT = config.llms.apiEndpoint +const API_ENDPOINT = config.llms.apiEndpoint // http://localhost:8080' // config.llms.apiEndpoint export const vertexCompletion = async ( conversation: ChatConversation[], model = config.llms.model ): Promise => { - try { - const data = { - model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo', - stream: false, - messages: conversation - } - const url = `${API_ENDPOINT}/vertex/completions` - const response = await axios.post(url, data) - if (response) { - const totalInputTokens = 4 // response.data.usage.prompt_tokens; - const totalOutputTokens = 5 // response.data.usage.completion_tokens; - return { - completion: { - content: response.data._prediction_response[0][0].candidates[0].content, - author: 'bot', - model - }, - usage: totalOutputTokens + totalInputTokens, - price: 0 - } - } + const data = { + model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo', + stream: false, + messages: conversation + } + const url = `${API_ENDPOINT}/vertex/completions` + const response = await axios.post(url, data) + if (response) { + const totalInputTokens = 4 // response.data.usage.prompt_tokens; + const totalOutputTokens = 5 // response.data.usage.completion_tokens; return { - completion: undefined, - usage: 0, + completion: { + content: response.data._prediction_response[0][0].candidates[0].content, + author: 'bot', + model + }, + usage: totalOutputTokens + totalInputTokens, price: 0 } - } catch (error: any) { - if (error instanceof AxiosError) { - console.log(error.code) - console.log(error.message) - console.log(error.stack) - } - throw error + } + return { + completion: undefined, + usage: 0, + price: 0 } } diff --git a/src/modules/llms/helpers.ts b/src/modules/llms/helpers.ts index 3101dbd..a7c9595 100644 --- a/src/modules/llms/helpers.ts +++ b/src/modules/llms/helpers.ts @@ -10,12 +10,14 @@ import { type ParseMode } from 'grammy/types' // import { getChatModel, getChatModelPrice, getTokenNumber } from "./api/openAi"; import { LlmsModelsEnum } from './types' import { type Message } from 'grammy/out/types' +import { llmAddUrlDocument } from './api/llmApi' export const SupportedCommands = { bardF: { name: 'bard' }, bard: { name: 'b' }, pdf: { name: 'pdf' }, - j2Ultra: { name: 'j2-ultra' } + j2Ultra: { name: 'j2-ultra' }, + sum: { name: 'sum' } } export const MAX_TRIES = 3 @@ -236,3 +238,38 @@ export const prepareConversation = ( return msgFiltered }) } + +export async function addUrlToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, url: string, prompt: string): Promise { + const collectionName = await llmAddUrlDocument({ + chatId, + url + }) + const msgId = (await ctx.reply('...', { + message_thread_id: + ctx.message?.message_thread_id ?? + ctx.message?.reply_to_message?.message_thread_id + })).message_id + + ctx.session.collections.collectionRequestQueue.push({ + collectionName, + collectionType: 'URL', + url: url.toLocaleLowerCase(), + prompt, + msgId + }) +} + +export async function addDocToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, fileName: string, url: string, prompt: string): Promise { + const collectionName = await llmAddUrlDocument({ + chatId, + url, + fileName + }) + ctx.session.collections.collectionRequestQueue.push({ + collectionName, + collectionType: 'PDF', + fileName, + url: url.toLocaleLowerCase(), + prompt + }) +} diff --git a/src/modules/llms/index.ts b/src/modules/llms/index.ts index fbf486f..8556a55 100644 --- a/src/modules/llms/index.ts +++ b/src/modules/llms/index.ts @@ -7,28 +7,34 @@ import { type OnMessageContext, type OnCallBackQueryData, type ChatConversation, - type ChatPayload, type PayableBot, RequestState + type ChatPayload, + type PayableBot, + type Collection, + RequestState } from '../types' import { appText } from '../open-ai/utils/text' import { chatService } from '../../database/services' import config from '../../config' import { sleep } from '../sd-images/utils' import { + addDocToCollection, + addUrlToCollection, hasBardPrefix, hasPrefix, + hasUrl, isMentioned, limitPrompt, MAX_TRIES, prepareConversation, SupportedCommands } from './helpers' -import { preparePrompt, sendMessage } from '../open-ai/helpers' +import { getUrlFromText, preparePrompt, sendMessage } from '../open-ai/helpers' import { vertexCompletion } from './api/vertex' -import { type LlmCompletion, llmCompletion } from './api/llmApi' +import { type LlmCompletion, llmCompletion, llmCheckCollectionStatus, queryUrlDocument } from './api/llmApi' import { LlmsModelsEnum } from './types' import * as Sentry from '@sentry/node' -import { handlePdf } from './api/pdfHandler' import { now } from '../../utils/perf' +import { AxiosError } from 'axios' export class LlmsBot implements PayableBot { public readonly module = 'LlmsBot' private readonly logger: Logger @@ -47,6 +53,10 @@ export class LlmsBot implements PayableBot { this.payments = payments } + public getEstimatedPrice (ctx: any): number { + return 0 + } + public isSupportedEvent ( ctx: OnMessageContext | OnCallBackQueryData ): boolean { @@ -57,14 +67,34 @@ export class LlmsBot implements PayableBot { return true } const chatPrefix = hasPrefix(ctx.message?.text ?? '') + const hasUrl = this.isSupportedUrlReply(ctx) + const hasPdf = this.isSupportedPdfReply(ctx) if (chatPrefix !== '') { return true } - return hasCommand + return hasCommand || !!hasUrl || !!hasPdf || this.isSupportedPdfFile(ctx) } - public getEstimatedPrice (ctx: any): number { - return 0 + isSupportedPdfReply (ctx: OnMessageContext | OnCallBackQueryData): string | undefined { + const documentType = ctx.message?.reply_to_message?.document?.mime_type + if (documentType === 'application/pdf') { + return ctx.message?.reply_to_message?.document?.file_name + } + return undefined + } + + private isSupportedUrlReply (ctx: OnMessageContext | OnCallBackQueryData): string | undefined { + return getUrlFromText(ctx) + } + + isSupportedPdfFile (ctx: OnMessageContext | OnCallBackQueryData): boolean { + const documentType = ctx.message?.document?.mime_type + const SupportedDocuments = { PDF: 'application/pdf' } + + if (documentType !== undefined) { + return Object.values(SupportedDocuments).includes(documentType) + } + return false } public async onEvent (ctx: OnMessageContext | OnCallBackQueryData): Promise { @@ -84,8 +114,18 @@ export class LlmsBot implements PayableBot { return } - if (ctx.hasCommand(SupportedCommands.pdf.name)) { - await this.onPdfHandler(ctx) + if (this.isSupportedUrlReply(ctx)) { + await this.onUrlReplyHandler(ctx) + return + } + + if (this.isSupportedPdfReply(ctx)) { + await this.onPdfReplyHandler(ctx) + return + } + + if (this.isSupportedPdfFile(ctx)) { + await this.onPdfFileReceived(ctx) return } @@ -94,11 +134,12 @@ export class LlmsBot implements PayableBot { return } - this.logger.warn('### unsupported command') - ctx.transient.analytics.sessionState = RequestState.Error - await sendMessage(ctx, '### unsupported command').catch(async (e) => { - await this.onError(ctx, e, MAX_TRIES, '### unsupported command') - }) + if (ctx.hasCommand(SupportedCommands.sum.name) || + (ctx.message?.text?.startsWith('sum ') && ctx.chat?.type === 'private') + ) { + await this.onSum(ctx) + return + } ctx.transient.analytics.actualResponseTime = now() } @@ -114,89 +155,278 @@ export class LlmsBot implements PayableBot { ) } - private async onPdfHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise { - if (!ctx.chat?.id) { - throw new Error('internal error') + private async onPdfFileReceived (ctx: OnMessageContext | OnCallBackQueryData): Promise { + try { + const file = await ctx.getFile() + const documentType = ctx.message?.document?.mime_type + if (documentType === 'application/pdf' && ctx.chat?.id) { + const url = file.getUrl() + const fileName = ctx.message?.document?.file_name ?? file.file_id + const prompt = ctx.message?.caption ?? 'Summarize this context' + await addDocToCollection(ctx, ctx.chat.id, fileName, url, prompt) + if (!ctx.session.collections.isProcessingQueue) { + ctx.session.collections.isProcessingQueue = true + await this.onCheckCollectionStatus(ctx).then(() => { + ctx.session.collections.isProcessingQueue = false + }) + } + } + ctx.transient.analytics.sessionState = RequestState.Success + } catch (ex) { + await this.onError(ctx, ex) + } finally { + ctx.transient.analytics.actualResponseTime = now() } + } + + async onPdfReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise { + try { + const fileName = this.isSupportedPdfReply(ctx) + const prompt = ctx.message?.text ?? 'Summarize this context' + if (fileName !== '') { + const collection = ctx.session.collections.activeCollections.find(c => c.fileName === fileName) + if (collection) { + await this.queryUrlCollection(ctx, collection.url, prompt) + } else { + if (!ctx.session.collections.isProcessingQueue) { + ctx.session.collections.isProcessingQueue = true + await this.onCheckCollectionStatus(ctx).then(() => { + ctx.session.collections.isProcessingQueue = false + }) + } + } + } + ctx.transient.analytics.actualResponseTime = now() + } catch (e: any) { + await this.onError(ctx, e) + } + } + + async onUrlReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise { + try { + const url = getUrlFromText(ctx) ?? '' + const prompt = ctx.message?.text ?? 'summarize' + const collection = ctx.session.collections.activeCollections.find(c => c.url === url) + const newPrompt = `${prompt}` // ${url} + if (!collection) { + if (ctx.chat?.id) { + await addUrlToCollection(ctx, ctx.chat?.id, url, newPrompt) + if (!ctx.session.collections.isProcessingQueue) { + ctx.session.collections.isProcessingQueue = true + await this.onCheckCollectionStatus(ctx).then(() => { + ctx.session.collections.isProcessingQueue = false + }) + } + } + } else { + await this.queryUrlCollection(ctx, url, newPrompt) + } + ctx.transient.analytics.actualResponseTime = now() + } catch (e: any) { + await this.onError(ctx, e) + } + } + + private getCollectionConversation (ctx: OnMessageContext | OnCallBackQueryData, collection: Collection): ChatConversation[] { + if (ctx.session.collections.currentCollection === collection.collectionName) { + return ctx.session.collections.collectionConversation + } + ctx.session.collections.currentCollection = collection.collectionName + return [] + } + + private async queryUrlCollection (ctx: OnMessageContext | OnCallBackQueryData, + url: string, + prompt: string): Promise { try { - const { chatConversation } = ctx.session.llms - const msgId = ( - await ctx.reply('...', { message_thread_id: ctx.message?.message_thread_id }) - ).message_id - const prompt = ctx.match as string - const response = await handlePdf(prompt) - if (response.completion) { - await ctx.api.editMessageText( - ctx.chat.id, - msgId, - response.completion.content - ).catch(async (e: any) => { await this.onError(ctx, e) }) + const collection = ctx.session.collections.activeCollections.find(c => c.url === url) + if (collection) { + const conversation = this.getCollectionConversation(ctx, collection) + const msgId = ( + await ctx.reply('...', { + message_thread_id: + ctx.message?.message_thread_id ?? + ctx.message?.reply_to_message?.message_thread_id + }) + ).message_id + const response = await queryUrlDocument({ + collectioName: collection.collectionName, + prompt, + conversation + }) if ( !(await this.payments.pay(ctx as OnMessageContext, response.price)) ) { await this.onNotBalanceMessage(ctx) - return + } else { + conversation.push({ + content: `${prompt} ${url}`, + role: 'user' + }, { + content: response.completion, + role: 'system' + }) + await ctx.api.editMessageText(ctx.chat?.id ?? '', + msgId, response.completion, + { parse_mode: 'Markdown', disable_web_page_preview: true }) + .catch(async (e) => { await this.onError(ctx, e) }) + ctx.session.collections.collectionConversation = [...conversation] + } + } + ctx.transient.analytics.actualResponseTime = now() + } catch (e: any) { + Sentry.captureException(e) + ctx.transient.analytics.sessionState = RequestState.Error + if (e instanceof AxiosError) { + if (e.message.includes('404')) { + ctx.session.collections.activeCollections = + [...ctx.session.collections.activeCollections.filter(c => c.url !== url.toLocaleLowerCase())] + await sendMessage(ctx, 'Collection not found, please try again') + } else { + await this.onError(ctx, e) } - chatConversation.push({ - content: prompt, - role: 'user' + } else { + await this.onError(ctx, e) + } + } + } + + async onCheckCollectionStatus (ctx: OnMessageContext | OnCallBackQueryData): Promise { + while (ctx.session.collections.collectionRequestQueue.length > 0) { + try { + const collection = ctx.session.collections.collectionRequestQueue.shift() + if (collection) { + const price = await llmCheckCollectionStatus(collection?.collectionName ?? '') + if (price > 0) { + if ( + !(await this.payments.pay(ctx as OnMessageContext, price)) + ) { + await this.onNotBalanceMessage(ctx) + } else { + ctx.session.collections.activeCollections.push(collection) + if (collection.msgId) { + const oneFee = await this.payments.getPriceInONE(price) + let statusMsg + if (collection.collectionType === 'URL') { + statusMsg = `${collection.url} processed (${this.payments.toONE(oneFee, false).toFixed(2)} ONE fee)` + } else { + statusMsg = `${collection.fileName} processed (${this.payments.toONE(oneFee, false).toFixed(2)} ONE fee)` + } + await ctx.api.editMessageText(ctx.chat?.id ?? '', + collection.msgId, statusMsg, + { + parse_mode: 'Markdown', + disable_web_page_preview: true + }) + .catch(async (e) => { await this.onError(ctx, e) }) + } + await this.queryUrlCollection(ctx, collection.url ?? '', + collection.prompt ?? 'summary') + } + } else { + ctx.session.collections.collectionRequestQueue.push(collection) + if (ctx.session.collections.collectionRequestQueue.length === 1) { + await sleep(5000) + } else { + await sleep(2500) + } + } + } + ctx.transient.analytics.actualResponseTime = now() + } catch (e: any) { + await this.onError(ctx, e) + } + } + } + + async onSum (ctx: OnMessageContext | OnCallBackQueryData): Promise { + if (this.botSuspended) { + ctx.transient.analytics.sessionState = RequestState.Error + await sendMessage(ctx, 'The bot is suspended').catch(async (e) => { + await this.onError(ctx, e) + }) + ctx.transient.analytics.actualResponseTime = now() + return + } + try { + const { prompt } = getCommandNamePrompt(ctx, SupportedCommands) + const newPrompt = prompt !== '' ? prompt : 'Summarize this context' + const { url } = hasUrl(ctx, prompt) + if (url && ctx.chat?.id) { + await this.urlHandler(ctx, url, newPrompt) + } else { + ctx.transient.analytics.sessionState = RequestState.Error + await sendMessage(ctx, 'Error: Missing url').catch(async (e) => { + await this.onError(ctx, e) }) - chatConversation.push(response.completion) + ctx.transient.analytics.actualResponseTime = now() } } catch (e) { await this.onError(ctx, e) } } + public async urlHandler (ctx: OnMessageContext | OnCallBackQueryData, url: string, prompt: string): Promise { + const collection = ctx.session.collections.activeCollections.find(c => c.url === url) + if (ctx.chat?.id) { + if (!collection) { + await addUrlToCollection(ctx, ctx.chat?.id, url, prompt) + if (!ctx.session.collections.isProcessingQueue) { + ctx.session.collections.isProcessingQueue = true + await this.onCheckCollectionStatus(ctx).then(() => { + ctx.session.collections.isProcessingQueue = false + }) + } + } else { + await this.queryUrlCollection(ctx, url, prompt) + } + } + ctx.transient.analytics.actualResponseTime = now() + } + private async promptGen (data: ChatPayload): Promise<{ price: number, chat: ChatConversation[] }> { const { conversation, ctx, model } = data if (!ctx.chat?.id) { throw new Error('internal error') } - try { - const msgId = ( - await ctx.reply('...', { message_thread_id: ctx.message?.message_thread_id }) - ).message_id - ctx.chatAction = 'typing' - let response: LlmCompletion = { - completion: undefined, - usage: 0, - price: 0 - } - const chat = prepareConversation(conversation, model) - if (model === LlmsModelsEnum.BISON) { - response = await vertexCompletion(chat, model) // "chat-bison@001"); - } else { - response = await llmCompletion(chat, model) - } - if (response.completion) { - await ctx.api.editMessageText( - ctx.chat.id, - msgId, - response.completion.content - ) - conversation.push(response.completion) - // const price = getPromptPrice(completion, data); - // this.logger.info( - // `streamChatCompletion result = tokens: ${ - // price.promptTokens + price.completionTokens - // } | ${model} | price: ${price}¢` - // ); - return { - price: 0, - chat: conversation - } - } - ctx.chatAction = null - + const msgId = ( + await ctx.reply('...', { message_thread_id: ctx.message?.message_thread_id }) + ).message_id + ctx.chatAction = 'typing' + let response: LlmCompletion = { + completion: undefined, + usage: 0, + price: 0 + } + const chat = prepareConversation(conversation, model) + if (model === LlmsModelsEnum.BISON) { + response = await vertexCompletion(chat, model) // "chat-bison@001"); + } else { + response = await llmCompletion(chat, model) + } + if (response.completion) { + await ctx.api.editMessageText( + ctx.chat.id, + msgId, + response.completion.content + ) + conversation.push(response.completion) + // const price = getPromptPrice(completion, data); + // this.logger.info( + // `streamChatCompletion result = tokens: ${ + // price.promptTokens + price.completionTokens + // } | ${model} | price: ${price}¢` + // ); return { price: 0, chat: conversation } - } catch (e: any) { - Sentry.captureException(e) - ctx.chatAction = null - throw e + } + ctx.chatAction = null + ctx.transient.analytics.actualResponseTime = now() + return { + price: 0, + chat: conversation } } @@ -247,6 +477,7 @@ export class LlmsBot implements PayableBot { ctx.session.llms.isProcessingQueue = false }) } + ctx.transient.analytics.actualResponseTime = now() } catch (e: any) { await this.onError(ctx, e) } @@ -375,6 +606,7 @@ export class LlmsBot implements PayableBot { `On method "${e.method}" | ${e.error_code} - ${e.description}` ) ctx.transient.analytics.actualResponseTime = now() + await sendMessage(ctx, 'Error handling your request').catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }) } } else { this.logger.error(`${e.toString()}`) diff --git a/src/modules/open-ai/helpers.ts b/src/modules/open-ai/helpers.ts index fca4aaa..885d2e6 100644 --- a/src/modules/open-ai/helpers.ts +++ b/src/modules/open-ai/helpers.ts @@ -3,12 +3,12 @@ import { type OnMessageContext, type OnCallBackQueryData, type MessageExtras, ty import { type ParseMode } from 'grammy/types' import { getChatModel, getChatModelPrice, getTokenNumber } from './api/openAi' import { type Message, type InlineKeyboardMarkup } from 'grammy/out/types' -import { llmAddUrlDocument } from '../llms/api/llmApi' +// import { llmAddUrlDocument } from '../llms/api/llmApi' export const SupportedCommands = { chat: { name: 'chat' }, ask: { name: 'ask' }, - sum: { name: 'sum' }, + // sum: { name: 'sum' }, ask35: { name: 'ask35' }, new: { name: 'new' }, gpt4: { name: 'gpt4' }, @@ -244,22 +244,34 @@ export const limitPrompt = (prompt: string): string => { return `${prompt} in around ${config.openAi.chatGpt.wordLimit} words` } -export async function addUrlToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, url: string, prompt: string): Promise { - const collectionName = await llmAddUrlDocument({ - chatId, - url - }) - const msgId = (await ctx.reply('...', { - message_thread_id: - ctx.message?.message_thread_id ?? - ctx.message?.reply_to_message?.message_thread_id - })).message_id +// export async function addUrlToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, url: string, prompt: string): Promise { +// const collectionName = await llmAddUrlDocument({ +// chatId, +// url +// }) +// const msgId = (await ctx.reply('...', { +// message_thread_id: +// ctx.message?.message_thread_id ?? +// ctx.message?.reply_to_message?.message_thread_id +// })).message_id - ctx.session.collections.collectionRequestQueue.push({ - collectionName, - collectionType: 'URL', - url, - prompt, - msgId - }) +// ctx.session.collections.collectionRequestQueue.push({ +// collectionName, +// collectionType: 'URL', +// url, +// prompt, +// msgId +// }) +// } + +export const getUrlFromText = (ctx: OnMessageContext | OnCallBackQueryData): string | undefined => { + const entities = ctx.message?.reply_to_message?.entities + if (entities) { + const urlEntity = entities.find(e => e.type === 'url') + if (urlEntity) { + const url = ctx.message?.reply_to_message?.text?.slice(urlEntity.offset, urlEntity.offset + urlEntity.length) + return url + } + } + return undefined } diff --git a/src/modules/open-ai/index.ts b/src/modules/open-ai/index.ts index f172a09..c2858c1 100644 --- a/src/modules/open-ai/index.ts +++ b/src/modules/open-ai/index.ts @@ -14,7 +14,6 @@ import { } from '../types' import { alterGeneratedImg, - // chatCompletion, getChatModel, getDalleModel, getDalleModelPrice, @@ -33,25 +32,23 @@ import { hasNewPrefix, hasPrefix, hasUrl, - // hasUsernamePassword, isMentioned, MAX_TRIES, preparePrompt, sendMessage, - SupportedCommands, - addUrlToCollection + SupportedCommands } from './helpers' -// import { getCrawlerPrice, getWebContent } from './utils/web-crawler' import * as Sentry from '@sentry/node' import { now } from '../../utils/perf' import { AxiosError } from 'axios' -import { llmCheckCollectionStatus, queryUrlDocument } from '../llms/api/llmApi' import { Callbacks } from '../types' +import { LlmsBot } from '../llms' export class OpenAIBot implements PayableBot { public readonly module = 'OpenAIBot' private readonly logger: Logger private readonly payments: BotPayments + private readonly llmsBot: LlmsBot private botSuspended: boolean constructor (payments: BotPayments) { @@ -67,6 +64,7 @@ export class OpenAIBot implements PayableBot { if (!config.openAi.dalle.isEnabled) { this.logger.warn('DALL·E 2 Image Bot is disabled in config') } + this.llmsBot = new LlmsBot(payments) } public isSupportedEvent ( @@ -83,7 +81,7 @@ export class OpenAIBot implements PayableBot { if (chatPrefix !== '') { return true } - return hasCommand || hasReply + return hasCommand || !!hasReply || this.llmsBot.isSupportedEvent(ctx) } public getEstimatedPrice (ctx: any): number { @@ -125,6 +123,9 @@ export class OpenAIBot implements PayableBot { ) // cents return price * priceAdjustment } + if (this.llmsBot.isSupportedEvent(ctx)) { + return 0 + } return 0 } catch (e) { Sentry.captureException(e) @@ -153,6 +154,12 @@ export class OpenAIBot implements PayableBot { } ctx.transient.analytics.sessionState = RequestState.Success + + if (this.isSupportedImageReply(ctx)) { + await this.onAlterImage(ctx) + return + } + if ( ctx.hasCommand(SupportedCommands.chat.name) || (ctx.message?.text?.startsWith('chat ') && ctx.chat?.type === 'private') @@ -214,18 +221,11 @@ export class OpenAIBot implements PayableBot { return } - if (this.isSupportedImageReply(ctx)) { - await this.onAlterImage(ctx) + if (this.llmsBot.isSupportedEvent(ctx)) { + await this.llmsBot.onEvent(ctx) return } - if ( - ctx.hasCommand(SupportedCommands.sum.name) || - (ctx.message?.text?.startsWith('sum ') && ctx.chat?.type === 'private') - ) { - await this.onSum(ctx) - return - } if (ctx.hasCommand(SupportedCommands.last.name)) { await this.onLast(ctx) return @@ -273,87 +273,6 @@ export class OpenAIBot implements PayableBot { ) } - onGenImgCmd = async (ctx: OnMessageContext | OnCallBackQueryData): Promise => { - try { - if (ctx.session.openAi.imageGen.isEnabled) { - let prompt = (ctx.match ? ctx.match : ctx.message?.text) as string - if (!prompt || prompt.split(' ').length === 1) { - prompt = config.openAi.dalle.defaultPrompt - } - ctx.chatAction = 'upload_photo' - const numImages = ctx.session.openAi.imageGen.numImages - const imgSize = ctx.session.openAi.imageGen.imgSize - const imgs = await postGenerateImg(prompt, numImages, imgSize) - const msgExtras = getMessageExtras({ caption: `/dalle ${prompt}` }) - await Promise.all(imgs.map(async (img: any) => { - await ctx.replyWithPhoto(img.url, msgExtras).catch(async (e) => { - await this.onError(ctx, e, MAX_TRIES) - }) - })) - ctx.transient.analytics.sessionState = RequestState.Success - ctx.transient.analytics.actualResponseTime = now() - } else { - ctx.transient.analytics.sessionState = RequestState.Error - await sendMessage(ctx, 'Bot disabled').catch(async (e) => { - await this.onError(ctx, e, MAX_TRIES, 'Bot disabled') - }) - ctx.transient.analytics.actualResponseTime = now() - } - } catch (e) { - await this.onError( - ctx, - e, - MAX_TRIES, - 'There was an error while generating the image' - ) - } - } - - onAlterImage = async (ctx: OnMessageContext | OnCallBackQueryData): Promise => { - try { - if (ctx.session.openAi.imageGen.isEnabled) { - const photo = - ctx.message?.photo ?? ctx.message?.reply_to_message?.photo - const prompt = ctx.message?.caption ?? ctx.message?.text - const fileId = photo?.pop()?.file_id // with pop() get full image quality - if (!fileId) { - await ctx.reply('Cannot retrieve the image file. Please try again.') - ctx.transient.analytics.actualResponseTime = now() - return - } - const file = await ctx.api.getFile(fileId) - const filePath = `${config.openAi.dalle.telegramFileUrl}${config.telegramBotAuthToken}/${file.file_path}` - const imgSize = ctx.session.openAi.imageGen.imgSize - ctx.chatAction = 'upload_photo' - const imgs = await alterGeneratedImg(prompt ?? '', filePath, ctx, imgSize) - if (imgs) { - imgs.map(async (img: any) => { - if (img?.url) { - await ctx - .replyWithPhoto(img.url, { message_thread_id: ctx.message?.message_thread_id }) - .catch(async (e) => { - await this.onError( - ctx, - e, - MAX_TRIES, - 'There was an error while generating the image' - ) - }) - } - }) - } - ctx.chatAction = null - } - } catch (e: any) { - await this.onError( - ctx, - e, - MAX_TRIES, - 'An error occurred while generating the AI edit' - ) - } - } - private async promptGen (data: ChatPayload, msgId?: number): Promise< { price: number, chat: ChatConversation[] }> { const { conversation, ctx, model } = data try { @@ -406,125 +325,6 @@ export class OpenAIBot implements PayableBot { } } - private async queryUrlCollection (ctx: OnMessageContext | OnCallBackQueryData, - url: string, - prompt: string, - conversation?: ChatConversation): Promise { - try { - const collection = ctx.session.collections.activeCollections.find(c => c.url === url) - if (collection) { - const msgId = ( - await ctx.reply('...', { - message_thread_id: - ctx.message?.message_thread_id ?? - ctx.message?.reply_to_message?.message_thread_id - }) - ).message_id - const response = await queryUrlDocument({ - collectioName: collection.collectionName, - prompt, - conversation - }) - if ( - !(await this.payments.pay(ctx as OnMessageContext, response.price)) - ) { - await this.onNotBalanceMessage(ctx) - } else { - console.log(ctx.chat?.id, msgId) - await ctx.api.editMessageText(ctx.chat?.id ?? '', - msgId, response.completion, - { parse_mode: 'Markdown' }) - .catch(async (e) => { await this.onError(ctx, e) }) - } - } - } catch (e: any) { - await this.onError(ctx, e) - } - } - - async onCheckCollectionStatus (ctx: OnMessageContext | OnCallBackQueryData): Promise { - while (ctx.session.collections.collectionRequestQueue.length > 0) { - try { - const collection = ctx.session.collections.collectionRequestQueue.shift() - if (collection) { - const price = await llmCheckCollectionStatus(collection?.collectionName ?? '') - if (price > 0) { - if ( - !(await this.payments.pay(ctx as OnMessageContext, price)) - ) { - await this.onNotBalanceMessage(ctx) - } else { - ctx.session.collections.activeCollections.push(collection) - if (collection.msgId) { - const oneFee = await this.payments.getPriceInONE(price) - let statusMsg - if (collection.collectionType === 'URL') { - statusMsg = `${collection.url} processed ${this.payments.toONE(oneFee, false).toFixed(2)} ONE fee)` - } else { - statusMsg = `${collection.fileName} processed ${this.payments.toONE(oneFee, false).toFixed(2)} ONE fee)` - } - await ctx.api.editMessageText(ctx.chat?.id ?? '', - collection.msgId, statusMsg, - { - parse_mode: 'Markdown', - disable_web_page_preview: true - }) - .catch(async (e) => { await this.onError(ctx, e) }) - } - await this.queryUrlCollection(ctx, collection.url ?? '', collection.prompt ?? 'summary') - } - } else { - ctx.session.collections.collectionRequestQueue.push(collection) - if (ctx.session.collections.collectionRequestQueue.length === 1) { - await sleep(5000) - } else { - await sleep(2500) - } - } - } - } catch (e: any) { - await this.onError(ctx, e) - } - } - } - - async onSum (ctx: OnMessageContext | OnCallBackQueryData): Promise { - if (this.botSuspended) { - ctx.transient.analytics.sessionState = RequestState.Error - await sendMessage(ctx, 'The bot is suspended').catch(async (e) => { - await this.onError(ctx, e) - }) - ctx.transient.analytics.actualResponseTime = now() - return - } - try { - const { prompt } = getCommandNamePrompt(ctx, SupportedCommands) - const { url, newPrompt } = hasUrl(ctx, prompt) - if (url && ctx.chat?.id) { - const collection = ctx.session.collections.activeCollections.find(c => c.url === url) - if (!collection) { - await addUrlToCollection(ctx, ctx.chat?.id, url, newPrompt) - if (!ctx.session.collections.isProcessingQueue) { - ctx.session.collections.isProcessingQueue = true - await this.onCheckCollectionStatus(ctx).then(() => { - ctx.session.collections.isProcessingQueue = false - }) - } - } else { - await this.queryUrlCollection(ctx, collection.collectionName, newPrompt) - } - } else { - ctx.transient.analytics.sessionState = RequestState.Error - await sendMessage(ctx, 'Error: Missing url').catch(async (e) => { - await this.onError(ctx, e) - }) - ctx.transient.analytics.actualResponseTime = now() - } - } catch (e) { - await this.onError(ctx, e) - } - } - async onMention (ctx: OnMessageContext | OnCallBackQueryData): Promise { try { if (this.botSuspended) { @@ -668,13 +468,7 @@ export class OpenAIBot implements PayableBot { }) } if (url && ctx.chat?.id) { - await addUrlToCollection(ctx, ctx.chat?.id, url, prompt) - if (!ctx.session.collections.isProcessingQueue) { - ctx.session.collections.isProcessingQueue = true - await this.onCheckCollectionStatus(ctx).then(() => { - ctx.session.collections.isProcessingQueue = false - }) - } + await this.llmsBot.urlHandler(ctx, url, prompt) } else { chatConversation.push({ role: 'user', @@ -703,6 +497,87 @@ export class OpenAIBot implements PayableBot { } } + onGenImgCmd = async (ctx: OnMessageContext | OnCallBackQueryData): Promise => { + try { + if (ctx.session.openAi.imageGen.isEnabled) { + let prompt = (ctx.match ? ctx.match : ctx.message?.text) as string + if (!prompt || prompt.split(' ').length === 1) { + prompt = config.openAi.dalle.defaultPrompt + } + ctx.chatAction = 'upload_photo' + const numImages = ctx.session.openAi.imageGen.numImages + const imgSize = ctx.session.openAi.imageGen.imgSize + const imgs = await postGenerateImg(prompt, numImages, imgSize) + const msgExtras = getMessageExtras({ caption: `/dalle ${prompt}` }) + await Promise.all(imgs.map(async (img: any) => { + await ctx.replyWithPhoto(img.url, msgExtras).catch(async (e) => { + await this.onError(ctx, e, MAX_TRIES) + }) + })) + ctx.transient.analytics.sessionState = RequestState.Success + ctx.transient.analytics.actualResponseTime = now() + } else { + ctx.transient.analytics.sessionState = RequestState.Error + await sendMessage(ctx, 'Bot disabled').catch(async (e) => { + await this.onError(ctx, e, MAX_TRIES, 'Bot disabled') + }) + ctx.transient.analytics.actualResponseTime = now() + } + } catch (e) { + await this.onError( + ctx, + e, + MAX_TRIES, + 'There was an error while generating the image' + ) + } + } + + onAlterImage = async (ctx: OnMessageContext | OnCallBackQueryData): Promise => { + try { + if (ctx.session.openAi.imageGen.isEnabled) { + const photo = + ctx.message?.photo ?? ctx.message?.reply_to_message?.photo + const prompt = ctx.message?.caption ?? ctx.message?.text + const fileId = photo?.pop()?.file_id // with pop() get full image quality + if (!fileId) { + await ctx.reply('Cannot retrieve the image file. Please try again.') + ctx.transient.analytics.actualResponseTime = now() + return + } + const file = await ctx.api.getFile(fileId) + const filePath = `${config.openAi.dalle.telegramFileUrl}${config.telegramBotAuthToken}/${file.file_path}` + const imgSize = ctx.session.openAi.imageGen.imgSize + ctx.chatAction = 'upload_photo' + const imgs = await alterGeneratedImg(prompt ?? '', filePath, ctx, imgSize) + if (imgs) { + imgs.map(async (img: any) => { + if (img?.url) { + await ctx + .replyWithPhoto(img.url, { message_thread_id: ctx.message?.message_thread_id }) + .catch(async (e) => { + await this.onError( + ctx, + e, + MAX_TRIES, + 'There was an error while generating the image' + ) + }) + } + }) + } + ctx.chatAction = null + } + } catch (e: any) { + await this.onError( + ctx, + e, + MAX_TRIES, + 'An error occurred while generating the AI edit' + ) + } + } + async onLast (ctx: OnMessageContext | OnCallBackQueryData): Promise { if (ctx.session.openAi.chatGpt.chatConversation.length > 0) { const chat = ctx.session.openAi.chatGpt.chatConversation @@ -798,6 +673,9 @@ export class OpenAIBot implements PayableBot { this.logger.error( `On method "${ex.method}" | ${ex.error_code} - ${ex.description}` ) + await sendMessage(ctx, 'Error handling your request').catch(async (e) => { + await this.onError(ctx, e, retryCount - 1) + }) } } else if (ex instanceof OpenAI.APIError) { // 429 RateLimitError diff --git a/src/modules/types.ts b/src/modules/types.ts index 1cf56bc..21b480d 100644 --- a/src/modules/types.ts +++ b/src/modules/types.ts @@ -86,8 +86,8 @@ export enum RequestState { export interface Collection { collectionName: string collectionType: 'URL' | 'PDF' + url: string fileName?: string - url?: string prompt?: string msgId?: number } @@ -101,7 +101,8 @@ export interface CollectionSessionData { activeCollections: Collection[] collectionRequestQueue: Collection[] isProcessingQueue: boolean - // docsJob: FileDoc[] + currentCollection: string + collectionConversation: ChatConversation[] } export interface Analytics { firstResponseTime: bigint