Skip to content

Commit

Permalink
Merge pull request #334 from harmony-one/collection-handler
Browse files Browse the repository at this point in the history
ctx command + delete collection logic
  • Loading branch information
theofandrich authored Oct 6, 2023
2 parents 3c10c39 + c97b5cb commit f367162
Show file tree
Hide file tree
Showing 7 changed files with 133 additions and 44 deletions.
10 changes: 3 additions & 7 deletions src/bot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -554,18 +554,14 @@ bot.command('love', async (ctx) => {
})
})

bot.command('stop', (ctx) => {
bot.command('stop', async (ctx) => {
logger.info('/stop command')
ctx.session.openAi.chatGpt.chatConversation = []
ctx.session.openAi.chatGpt.usage = 0
ctx.session.openAi.chatGpt.price = 0
await openAiBot.onStop(ctx as OnMessageContext)
ctx.session.translate.enable = false
ctx.session.translate.languages = []
ctx.session.oneCountry.lastDomain = ''
ctx.session.llms.chatConversation = []
ctx.session.llms.usage = 0
ctx.session.llms.price = 0
})

// bot.command("memo", (ctx) => {
// ctx.reply(MEMO.text, {
// parse_mode: "Markdown",
Expand Down
4 changes: 2 additions & 2 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ export default {
? parseInt(process.env.SESSION_TIMEOUT)
: 48, // in hours
llms: {
apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000',
apiEndpoint: process.env.LLMS_ENDPOINT, // 'http://127.0.0.1:5000', // process.env.LLMS_ENDPOINT, //
wordLimit: 50,
model: 'chat-bison',
minimumBalance: 0,
Expand Down Expand Up @@ -76,7 +76,7 @@ export default {
isTypingEnabled: Boolean(
parseInt(process.env.TYPING_STATUS_ENABLED ?? '1')
),
model: process.env.OPENAI_MODEL ?? 'gpt-4',
model: process.env.OPENAI_MODEL ?? 'gpt-3.5-turbo',
prefixes: {
chatPrefix: process.env.ASK_PREFIX
? process.env.ASK_PREFIX.split(',')
Expand Down
24 changes: 0 additions & 24 deletions src/modules/1country/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import { MAX_TRIES, sendMessage } from '../open-ai/helpers'
import { sleep } from '../sd-images/utils'
import { isValidUrl } from '../open-ai/utils/web-crawler'
import { now } from '../../utils/perf'
import OpenAI from 'openai'

export const SupportedCommands = {
register: { name: 'rent' },
Expand Down Expand Up @@ -526,14 +525,6 @@ export class OneCountryBot implements PayableBot {
return input.replace(/[^a-z0-9-]/g, '').toLowerCase()
}

async onEnd (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
ctx.session.collections.activeCollections = []
ctx.session.collections.collectionConversation = []
ctx.session.collections.collectionRequestQueue = []
ctx.session.collections.currentCollection = ''
ctx.session.collections.isProcessingQueue = false
}

async onError (
ctx: OnMessageContext | OnCallBackQueryData,
ex: any,
Expand Down Expand Up @@ -582,21 +573,6 @@ export class OneCountryBot implements PayableBot {
`On method "${ex.method}" | ${ex.error_code} - ${ex.description}`
)
}
} else if (ex instanceof OpenAI.APIError) {
// 429 RateLimitError
// e.status = 400 || e.code = BadRequestError
this.logger.error(`OPENAI Error ${ex.status}(${ex.code}) - ${ex.message}`)
if (ex.code === 'context_length_exceeded') {
await sendMessage(ctx, ex.message).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
ctx.transient.analytics.actualResponseTime = now()
await this.onEnd(ctx)
} else {
await sendMessage(
ctx,
'Error accessing OpenAI (ChatGPT). Please try later'
).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
ctx.transient.analytics.actualResponseTime = now()
}
} else {
this.logger.error(`${ex.toString()}`)
await sendMessage(ctx, 'Error handling your request')
Expand Down
20 changes: 16 additions & 4 deletions src/modules/llms/api/llmApi.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
import axios from 'axios'
import config from '../../../config'
import { type ChatConversation } from '../../types'
import pino from 'pino'

const API_ENDPOINT = config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000'
const API_ENDPOINT = config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint

const logger = pino({
name: 'llmApi',
transport: {
target: 'pino-pretty',
options: { colorize: true }
}
})

export interface LlmCompletion {
completion: ChatConversation | undefined
Expand Down Expand Up @@ -35,7 +44,6 @@ export const llmAddUrlDocument = async (args: LlmAddUrlDocument): Promise<string

export const llmCheckCollectionStatus = async (name: string): Promise<number> => {
const endpointUrl = `${API_ENDPOINT}/collections/document/${name}` // ?collectionName=${collectionName}`
console.log(endpointUrl)
const response = await axios.get(endpointUrl)
if (response) {
return response.data.price
Expand All @@ -50,7 +58,6 @@ interface QueryUrlDocumentOutput {

export const queryUrlDocument = async (args: QueryUrlDocument): Promise<QueryUrlDocumentOutput> => {
const data = { collectionName: args.collectioName, prompt: args.prompt, conversation: args.conversation }
console.log(data.conversation)
const endpointUrl = `${API_ENDPOINT}/collections/query`
const response = await axios.post(endpointUrl, data)
if (response) {
Expand All @@ -62,11 +69,16 @@ export const queryUrlDocument = async (args: QueryUrlDocument): Promise<QueryUrl
}
}

export const deleteCollection = async (collectionName: string): Promise<void> => {
const endpointUrl = `${API_ENDPOINT}/collections/document/${collectionName}`
await axios.delete(endpointUrl)
logger.info(`Collection ${collectionName} deleted`)
}

export const llmCompletion = async (
conversation: ChatConversation[],
model = config.llms.model
): Promise<LlmCompletion> => {
// eslint-disable-next-line no-useless-catch
const data = {
model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo',
stream: false,
Expand Down
12 changes: 9 additions & 3 deletions src/modules/llms/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ import { llmAddUrlDocument } from './api/llmApi'
export const SupportedCommands = {
bardF: { name: 'bard' },
bard: { name: 'b' },
pdf: { name: 'pdf' },
j2Ultra: { name: 'j2-ultra' },
sum: { name: 'sum' }
sum: { name: 'sum' },
ctx: { name: 'ctx' }
}

export const MAX_TRIES = 3
Expand Down Expand Up @@ -265,11 +265,17 @@ export async function addDocToCollection (ctx: OnMessageContext | OnCallBackQuer
url,
fileName
})
const msgId = (await ctx.reply('...', {
message_thread_id:
ctx.message?.message_thread_id ??
ctx.message?.reply_to_message?.message_thread_id
})).message_id
ctx.session.collections.collectionRequestQueue.push({
collectionName,
collectionType: 'PDF',
fileName,
url: url.toLocaleLowerCase(),
prompt
prompt,
msgId
})
}
102 changes: 98 additions & 4 deletions src/modules/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,12 @@ import {
} from './helpers'
import { getUrlFromText, preparePrompt, sendMessage } from '../open-ai/helpers'
import { vertexCompletion } from './api/vertex'
import { type LlmCompletion, llmCompletion, llmCheckCollectionStatus, queryUrlDocument } from './api/llmApi'
import { type LlmCompletion, llmCompletion, llmCheckCollectionStatus, queryUrlDocument, deleteCollection } from './api/llmApi'
import { LlmsModelsEnum } from './types'
import * as Sentry from '@sentry/node'
import { now } from '../../utils/perf'
import { AxiosError } from 'axios'
import OpenAI from 'openai'
export class LlmsBot implements PayableBot {
public readonly module = 'LlmsBot'
private readonly logger: Logger
Expand Down Expand Up @@ -130,7 +131,12 @@ export class LlmsBot implements PayableBot {
}

if (ctx.hasCommand(SupportedCommands.j2Ultra.name)) {
await this.onChat(ctx, LlmsModelsEnum.J2_ULTRA) // .J2_ULTRA);
await this.onChat(ctx, LlmsModelsEnum.J2_ULTRA)
return
}

if (ctx.hasCommand(SupportedCommands.ctx.name)) {
await this.onCurrentCollection(ctx)
return
}

Expand Down Expand Up @@ -235,6 +241,64 @@ export class LlmsBot implements PayableBot {
return []
}

private async onCurrentCollection (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
try {
let prompt = ''
prompt = ctx.match as string
// add prefix logic here if prompt == ''
const collectionName = ctx.session.collections.currentCollection
const collection = ctx.session.collections.activeCollections.find(c => c.collectionName === collectionName)
if (collection && collectionName) {
const conversation = ctx.session.collections.collectionConversation
const msgId = (
await ctx.reply('...', {
message_thread_id:
ctx.message?.message_thread_id ??
ctx.message?.reply_to_message?.message_thread_id
})
).message_id
const response = await queryUrlDocument({
collectioName: collection.collectionName,
prompt,
conversation
})
if (
!(await this.payments.pay(ctx as OnMessageContext, response.price))
) {
await this.onNotBalanceMessage(ctx)
} else {
conversation.push({
content: `${prompt} ${collection.url}`,
role: 'user'
}, {
content: response.completion,
role: 'system'
})
await ctx.api.editMessageText(ctx.chat?.id ?? '',
msgId, response.completion,
{ parse_mode: 'Markdown', disable_web_page_preview: true })
.catch(async (e) => { await this.onError(ctx, e) })
ctx.session.collections.collectionConversation = [...conversation]
}
} else {
await sendMessage(ctx, 'There is no active collection (url/pdf file)')
}
} catch (e: any) {
Sentry.captureException(e)
ctx.transient.analytics.sessionState = RequestState.Error
if (e instanceof AxiosError) {
if (e.message.includes('404')) {
ctx.session.collections.activeCollections =
[...ctx.session.collections.activeCollections.filter(c => c.collectionName !==
ctx.session.collections.currentCollection)]
await sendMessage(ctx, 'Collection not found, please try again')
return
}
}
await this.onError(ctx, e)
}
}

private async queryUrlCollection (ctx: OnMessageContext | OnCallBackQueryData,
url: string,
prompt: string): Promise<void> {
Expand Down Expand Up @@ -537,7 +601,16 @@ export class LlmsBot implements PayableBot {
}
}

async onEnd (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
async onStop (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
for (const c of ctx.session.collections.activeCollections) {
this.logger.info(`Deleting collection ${c.collectionName}`)
await deleteCollection(c.collectionName)
}
ctx.session.collections.activeCollections = []
ctx.session.collections.collectionConversation = []
ctx.session.collections.collectionRequestQueue = []
ctx.session.collections.currentCollection = ''
ctx.session.collections.isProcessingQueue = false
ctx.session.llms.chatConversation = []
ctx.session.llms.usage = 0
ctx.session.llms.price = 0
Expand Down Expand Up @@ -608,9 +681,30 @@ export class LlmsBot implements PayableBot {
ctx.transient.analytics.actualResponseTime = now()
await sendMessage(ctx, 'Error handling your request').catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
}
} else if (e instanceof OpenAI.APIError) {
// 429 RateLimitError
// e.status = 400 || e.code = BadRequestError
this.logger.error(`OPENAI Error ${e.status}(${e.code}) - ${e.message}`)
if (e.code === 'context_length_exceeded') {
await sendMessage(ctx, e.message).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
ctx.transient.analytics.actualResponseTime = now()
await this.onStop(ctx)
} else {
await sendMessage(
ctx,
'Error accessing OpenAI (ChatGPT). Please try later'
).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
ctx.transient.analytics.actualResponseTime = now()
}
} else if (e instanceof AxiosError) {
await sendMessage(ctx, 'Error handling your request').catch(async (e) => {
await this.onError(ctx, e, retryCount - 1)
})
} else {
this.logger.error(`${e.toString()}`)
await sendMessage(ctx, 'Error handling your request').catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
await sendMessage(ctx, 'Error handling your request')
.catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }
)
ctx.transient.analytics.actualResponseTime = now()
}
}
Expand Down
5 changes: 5 additions & 0 deletions src/modules/open-ai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -602,6 +602,11 @@ export class OpenAIBot implements PayableBot {
ctx.session.openAi.chatGpt.price = 0
}

async onStop (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
await this.onEnd(ctx)
await this.llmsBot.onStop(ctx)
}

async onNotBalanceMessage (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
const accountId = this.payments.getAccountId(ctx)
const account = this.payments.getUserAccount(accountId)
Expand Down

0 comments on commit f367162

Please sign in to comment.