Skip to content

Commit

Permalink
fix conflict
Browse files Browse the repository at this point in the history
  • Loading branch information
Sun Hyuk Ahn committed Oct 4, 2023
2 parents b847f08 + 5287afb commit 06b0c3f
Show file tree
Hide file tree
Showing 3 changed files with 283 additions and 235 deletions.
115 changes: 100 additions & 15 deletions src/modules/document-handler/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,29 @@ import { type OnMessageContext, type PayableBot, type RefundCallback, RequestSta
import * as Sentry from '@sentry/node'
import { now } from '../../utils/perf'
import { llmAddUrlDocument } from '../llms/api/llmApi'
import { type Logger, pino } from 'pino'
import { GrammyError } from 'grammy'
import { sendMessage } from '../open-ai/helpers'
import { sleep } from '../sd-images/utils'
import { AxiosError } from 'axios'

const SupportedDocuments = { PDF: 'application/pdf' }

const MAX_TRIES = 3
export class DocumentHandler implements PayableBot {
public readonly module = 'DocumentHandler'
private readonly logger: Logger

constructor () {
this.logger = pino({
name: 'OpenAIBot',
transport: {
target: 'pino-pretty',
options: { colorize: true }
}
})
}

public getEstimatedPrice (ctx: OnMessageContext): number {
return 1
}
Expand All @@ -18,9 +37,9 @@ export class DocumentHandler implements PayableBot {
if (documentType === 'application/pdf' && ctx.chat.id) {
const url = file.getUrl()
const fileName = ctx.message.document?.file_name ?? file.file_id
await this.addDocToCollection(ctx, ctx.chat.id, fileName, url)
const prompt = ctx.message.caption ?? ''
await this.addDocToCollection(ctx, ctx.chat.id, fileName, url, prompt)
}
console.log(file)
ctx.transient.analytics.sessionState = RequestState.Success
} catch (ex) {
Sentry.captureException(ex)
Expand All @@ -39,18 +58,84 @@ export class DocumentHandler implements PayableBot {
return false
}

private async addDocToCollection (ctx: OnMessageContext, chatId: number, fileName: string, url: string): Promise<void> {
const collectionName = await llmAddUrlDocument({
chatId,
url,
fileName
})
ctx.session.collections.collectionRequestQueue.push({
collectionName,
collectionType: 'PDF',
fileName,
url
})
ctx.session.collections.isProcessingQueue = true
private async addDocToCollection (ctx: OnMessageContext, chatId: number, fileName: string, url: string, prompt: string): Promise<void> {
try {
const collectionName = await llmAddUrlDocument({
chatId,
url,
fileName
})
ctx.session.collections.collectionRequestQueue.push({
collectionName,
collectionType: 'PDF',
fileName,
url,
prompt
})
} catch (e: any) {
await this.onError(ctx, e)
}
}

async onError (
ctx: OnMessageContext,
ex: any,
retryCount: number = MAX_TRIES,
msg?: string
): Promise<void> {
ctx.transient.analytics.sessionState = RequestState.Error
Sentry.setContext('open-ai', { retryCount, msg })
Sentry.captureException(ex)
if (retryCount === 0) {
// Retry limit reached, log an error or take alternative action
this.logger.error(`Retry limit reached for error: ${ex}`)
return
}
if (ex instanceof GrammyError) {
if (ex.error_code === 400 && ex.description.includes('not enough rights')) {
await sendMessage(
ctx,
'Error: The bot does not have permission to send photos in chat'
)
ctx.transient.analytics.actualResponseTime = now()
} else if (ex.error_code === 429) {
const retryAfter = ex.parameters.retry_after
? ex.parameters.retry_after < 60
? 60
: ex.parameters.retry_after * 2
: 60
const method = ex.method
const errorMessage = `On method "${method}" | ${ex.error_code} - ${ex.description}`
this.logger.error(errorMessage)
await sendMessage(
ctx,
`${
ctx.from.username ? ctx.from.username : ''
} Bot has reached limit, wait ${retryAfter} seconds`
).catch(async (e) => { await this.onError(ctx, e, retryCount - 1) })
ctx.transient.analytics.actualResponseTime = now()
if (method === 'editMessageText') {
ctx.session.openAi.chatGpt.chatConversation.pop() // deletes last prompt
}
await sleep(retryAfter * 1000) // wait retryAfter seconds to enable bot
} else {
this.logger.error(
`On method "${ex.method}" | ${ex.error_code} - ${ex.description}`
)
await sendMessage(ctx, 'Error handling your request').catch(async (e) => {
await this.onError(ctx, e, retryCount - 1)
})
}
} else if (ex instanceof AxiosError) {
await sendMessage(ctx, 'Error handling your request').catch(async (e) => {
await this.onError(ctx, e, retryCount - 1)
})
} else {
this.logger.error(`${ex.toString()}`)
await sendMessage(ctx, 'Error handling your request')
.catch(async (e) => { await this.onError(ctx, e, retryCount - 1) }
)
ctx.transient.analytics.actualResponseTime = now()
}
}
}
115 changes: 26 additions & 89 deletions src/modules/llms/api/llmApi.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import axios, { AxiosError } from 'axios'
import axios from 'axios'
import config from '../../../config'
import { type ChatConversation } from '../../types'

const API_ENDPOINT = 'http://127.0.0.1:5000' // config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000'
const API_ENDPOINT = config.llms.apiEndpoint // 'http://localhost:8080' // http://127.0.0.1:5000' // config.llms.apiEndpoint // config.llms.apiEndpoint // 'http://127.0.0.1:5000'

export interface LlmCompletion {
completion: ChatConversation | undefined
Expand All @@ -24,42 +24,23 @@ interface QueryUrlDocument {
}

export const llmAddUrlDocument = async (args: LlmAddUrlDocument): Promise<string> => {
try {
const data = { ...args }
const endpointUrl = `${API_ENDPOINT}/collections/document`
const response = await axios.post(endpointUrl, data)
if (response) {
return response.data.collectionName
}
return ''
} catch (error: any) {
if (error instanceof AxiosError) {
console.log(error.code)
console.log(error.message)
console.log(error.stack)
}
throw error
const data = { ...args }
const endpointUrl = `${API_ENDPOINT}/collections/document`
const response = await axios.post(endpointUrl, data)
if (response) {
return response.data.collectionName
}
return ''
}

export const llmCheckCollectionStatus = async (name: string): Promise<number> => {
try {
// const data = { collectionName: name }
const endpointUrl = `${API_ENDPOINT}/collections/document/${name}` // ?collectionName=${collectionName}`
console.log(endpointUrl)
const response = await axios.get(endpointUrl) // , { params: data }
if (response) {
return response.data.price
}
return -1
} catch (error: any) {
if (error instanceof AxiosError) {
console.log(error.code)
console.log(error.message)
console.log(error.stack)
}
throw error
const endpointUrl = `${API_ENDPOINT}/collections/document/${name}` // ?collectionName=${collectionName}`
console.log(endpointUrl)
const response = await axios.get(endpointUrl)
if (response) {
return response.data.price
}
return -1
}

interface QueryUrlDocumentOutput {
Expand All @@ -86,66 +67,22 @@ export const llmCompletion = async (
model = config.llms.model
): Promise<LlmCompletion> => {
// eslint-disable-next-line no-useless-catch
try {
const data = {
model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo',
stream: false,
messages: conversation
}
const url = `${API_ENDPOINT}/llms/completions`
const response = await axios.post(url, data)

if (response) {
const totalInputTokens = response.data.usage.prompt_tokens
const totalOutputTokens = response.data.usage.completion_tokens
const completion = response.data.choices

return {
completion: {
content: completion[0].message?.content,
role: 'system',
model
},
usage: totalOutputTokens + totalInputTokens,
price: 0
}
}
return {
completion: undefined,
usage: 0,
price: 0
}
} catch (error: any) {
throw error
}
}

export const llmWebCrawler = async (
prompt: string,
model: string,
chadId: number,
msgId: number,
url: string
): Promise<LlmCompletion> => {
if (!url.startsWith('https://')) {
url = `https://${url}`
}
const data = {
prompt,
chatId: '' + chadId,
msgId: '' + msgId,
token: '' + config.telegramBotAuthToken,
url
model, // chat-bison@001 'chat-bison', //'gpt-3.5-turbo',
stream: false,
messages: conversation
}
const urlApi = `${API_ENDPOINT}/llama-index/text`
const response = await axios.post(urlApi, data)
if (response.data) {
const totalInputTokens = 0 // response.data.usage.prompt_tokens
const totalOutputTokens = 0 // response.data.usage.completion_tokens
const completion = response.data
const url = `${API_ENDPOINT}/llms/completions`
const response = await axios.post(url, data)

if (response) {
const totalInputTokens = response.data.usage.prompt_tokens
const totalOutputTokens = response.data.usage.completion_tokens
const completion = response.data.choices

return {
completion: {
content: completion ?? '',
content: completion[0].message?.content,
role: 'system',
model
},
Expand Down
Loading

0 comments on commit 06b0c3f

Please sign in to comment.