Skip to content

Commit

Permalink
add min balance to openAI bot
Browse files Browse the repository at this point in the history
  • Loading branch information
fegloff committed Apr 4, 2024
1 parent c87e464 commit 28304b7
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 47 deletions.
1 change: 1 addition & 0 deletions src/modules/llms/api/athropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ export const anthropicStreamCompletion = async (
msgId: number,
limitTokens = true
): Promise<LlmCompletion> => {
logger.info(`Handling ${model} stream completion`)
const data = {
model,
stream: true, // Set stream to true to receive the completion as a stream
Expand Down
35 changes: 28 additions & 7 deletions src/modules/open-ai/api/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ import {
ChatGPTModels,
type DalleGPTModel,
DalleGPTModels,
ChatGPTModelsEnum
ChatGPTModelsEnum,
type ChatGptCompletion
} from '../types'
import type fs from 'fs'
import { type ChatCompletionMessageParam } from 'openai/resources/chat/completions'
Expand Down Expand Up @@ -114,7 +115,7 @@ export const streamChatCompletion = async (
model = config.openAi.chatGpt.model,
msgId: number,
limitTokens = true
): Promise<string> => {
): Promise<ChatGptCompletion> => {
let completion = ''
let wordCountMinimum = 2
const stream = await openai.chat.completions.create({
Expand Down Expand Up @@ -161,7 +162,8 @@ export const streamChatCompletion = async (
}
}
completion = completion.replaceAll('...', '')

const inputTokens = getTokenNumber(conversation[conversation.length - 1].content as string) + ctx.session.openAi.chatGpt.usage
const outputTokens = getTokenNumber(completion)
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch((e: any) => {
Expand All @@ -175,7 +177,16 @@ export const streamChatCompletion = async (
throw e
}
})
return completion
return {
completion: {
content: completion,
role: 'assistant'
},
usage: outputTokens + inputTokens,
price: 0,
inputTokens,
outputTokens
}
}

export const streamChatVisionCompletion = async (
Expand All @@ -185,7 +196,7 @@ export const streamChatVisionCompletion = async (
imgUrls: string[],
msgId: number,
limitTokens = true
): Promise<string> => {
): Promise<ChatGptCompletion> => {
let completion = ''
let wordCountMinimum = 2
const payload: any = {
Expand Down Expand Up @@ -244,7 +255,8 @@ export const streamChatVisionCompletion = async (
}
}
completion = completion.replaceAll('...', '')

const inputTokens = getTokenNumber(prompt) + ctx.session.openAi.chatGpt.usage
const outputTokens = getTokenNumber(completion)
await ctx.api
.editMessageText(ctx.chat?.id, msgId, completion)
.catch((e: any) => {
Expand All @@ -258,7 +270,16 @@ export const streamChatVisionCompletion = async (
throw e
}
})
return completion
return {
completion: {
content: completion,
role: 'assistant'
},
usage: outputTokens + inputTokens,
price: 0,
inputTokens,
outputTokens
}
}

export async function improvePrompt (promptText: string, model: string): Promise<string> {
Expand Down
59 changes: 26 additions & 33 deletions src/modules/open-ai/helpers.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import config from '../../config'
import { type OnMessageContext, type OnCallBackQueryData, type MessageExtras, type ChatPayload } from '../types'
import { type ParseMode } from 'grammy/types'
import { getChatModel, getChatModelPrice, getTokenNumber } from './api/openAi'
import { getChatModel, getChatModelPrice } from './api/openAi'
import { type Message, type InlineKeyboardMarkup } from 'grammy/out/types'
import { isValidUrl } from './utils/web-crawler'
import { type ChatGptCompletion } from './types'

export enum SupportedCommands {
chat = 'chat',
Expand Down Expand Up @@ -240,24 +241,20 @@ export const hasPrefix = (prompt: string): string => {
)
}

export const getPromptPrice = (completion: string, data: ChatPayload): { price: number, promptTokens: number, completionTokens: number, totalTokens: number } => {
const { conversation, ctx, model } = data
const currentUsage = data.prompt ? 0 : ctx.session.openAi.chatGpt.usage
const prompt = data.prompt ? data.prompt : conversation[conversation.length - 1].content
const promptTokens = getTokenNumber(prompt as string) + currentUsage
const completionTokens = getTokenNumber(completion)
export const getPromptPrice = (completion: ChatGptCompletion, data: ChatPayload, updateSession = true): { price: number, promptTokens: number, completionTokens: number } => {
const { ctx, model } = data
const modelPrice = getChatModel(model)
const price =
getChatModelPrice(modelPrice, true, promptTokens, completionTokens) *
getChatModelPrice(modelPrice, true, completion.inputTokens ?? 0, completion.outputTokens ?? 0) *
config.openAi.chatGpt.priceAdjustment
conversation.push({ content: completion, role: 'system' })
ctx.session.openAi.chatGpt.usage += completionTokens
ctx.session.openAi.chatGpt.price += price
if (updateSession) {
ctx.session.openAi.chatGpt.usage += completion.outputTokens ?? 0
ctx.session.openAi.chatGpt.price += price
}
return {
price,
promptTokens,
completionTokens,
totalTokens: data.prompt ? promptTokens + completionTokens : ctx.session.openAi.chatGpt.usage
promptTokens: completion.inputTokens ?? 0,
completionTokens: completion.outputTokens ?? 0
}
}

Expand Down Expand Up @@ -285,22 +282,18 @@ export const getUrlFromText = (ctx: OnMessageContext | OnCallBackQueryData): str
return undefined
}

// export async function addUrlToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, url: string, prompt: string): Promise<void> {
// const collectionName = await llmAddUrlDocument({
// chatId,
// url
// })
// const msgId = (await ctx.reply('...', {
// message_thread_id:
// ctx.message?.message_thread_id ??
// ctx.message?.reply_to_message?.message_thread_id
// })).message_id

// ctx.session.collections.collectionRequestQueue.push({
// collectionName,
// collectionType: 'URL',
// url,
// prompt,
// msgId
// })
// }
export const getMinBalance = async (ctx: OnMessageContext | OnCallBackQueryData,
model: string): Promise<number> => {
const minBalance = getPromptPrice({
inputTokens: 400,
outputTokens: 800,
completion: undefined,
usage: 0,
price: 0
}, {
ctx,
model: model ?? '',
conversation: []
}, false)
return minBalance.price
}
24 changes: 17 additions & 7 deletions src/modules/open-ai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ import {
MAX_TRIES,
preparePrompt,
sendMessage,
SupportedCommands
SupportedCommands,
getMinBalance
} from './helpers'
import * as Sentry from '@sentry/node'
import { now } from '../../utils/perf'
Expand Down Expand Up @@ -407,14 +408,17 @@ export class OpenAIBot implements PayableBot {

private async hasBalance (ctx: OnMessageContext | OnCallBackQueryData,
minBalance = +config.openAi.chatGpt.minimumBalance): Promise<boolean> {
const minBalanceOne = this.payments.toONE(await this.payments.getPriceInONE(minBalance), false)
const accountId = this.payments.getAccountId(ctx)
const addressBalance = await this.payments.getUserBalance(accountId)
const { totalCreditsAmount } = await chatService.getUserCredits(accountId)
const balance = addressBalance.plus(totalCreditsAmount)
const balanceOne = this.payments.toONE(balance, false)
const isGroupInWhiteList = await this.payments.isGroupInWhitelist(ctx as OnMessageContext)
return (
(+balanceOne > minBalance) ||
(this.payments.isUserInWhitelist(ctx.from.id, ctx.from.username))
+balanceOne > +minBalanceOne ||
(this.payments.isUserInWhitelist(ctx.from.id, ctx.from.username)) ||
isGroupInWhiteList
)
}

Expand Down Expand Up @@ -517,8 +521,12 @@ export class OpenAIBot implements PayableBot {
ctx.transient.analytics.actualResponseTime = now()
const price = getPromptPrice(completion, data)
this.logger.info(
`streamChatCompletion result = tokens: ${price.totalTokens} | ${model} | price: ${price.price}¢` // price.promptTokens + price.completionTokens }
`streamChatCompletion result = tokens: ${price.promptTokens + price.completionTokens} | ${model} | price: ${price.price}¢` // price.promptTokens + price.completionTokens }
)
conversation.push({
role: 'assistant',
content: completion.completion?.content ?? ''
})
return {
price: price.price,
chat: conversation
Expand All @@ -527,7 +535,7 @@ export class OpenAIBot implements PayableBot {
} else {
const response = await chatCompletion(conversation, ChatGPTModelsEnum.GPT_35_TURBO_16K)
conversation.push({
role: 'system',
role: 'assistant',
content: response.completion
})
await responseWithVoice(response.completion, ctx as OnMessageContext, msgId)
Expand Down Expand Up @@ -669,7 +677,8 @@ export class OpenAIBot implements PayableBot {
try {
const prompt = ctx.session.openAi.chatGpt.requestQueue.shift() ?? ''
const { chatConversation, model } = ctx.session.openAi.chatGpt
if (await this.hasBalance(ctx)) {
const minBalance = await getMinBalance(ctx, ChatGPTModelsEnum.GPT_4_32K)
if (await this.hasBalance(ctx, minBalance)) {
if (prompt === '') {
const msg =
chatConversation.length > 0
Expand Down Expand Up @@ -724,7 +733,8 @@ export class OpenAIBot implements PayableBot {
while (ctx.session.openAi.imageGen.imgRequestQueue.length > 0) {
try {
const img = ctx.session.openAi.imageGen.imgRequestQueue.shift()
if (await this.hasBalance(ctx)) {
const minBalance = await getMinBalance(ctx, ChatGPTModelsEnum.GPT_4_32K)
if (await this.hasBalance(ctx, minBalance)) {
if (img?.command === 'dalle') {
await this.onGenImgCmd(img?.prompt, ctx)
} else if (img?.command === 'alter') {
Expand Down
9 changes: 9 additions & 0 deletions src/modules/open-ai/types.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import { type ChatConversation } from '../types'

export interface ChatModel {
name: string
inputPrice: number
Expand All @@ -18,6 +20,13 @@ export enum ChatGPTModelsEnum {
GPT_35_TURBO_16K = 'gpt-3.5-turbo-16k',
GPT_4_VISION_PREVIEW = 'gpt-4-vision-preview'
}
export interface ChatGptCompletion {
completion: ChatConversation | undefined
usage: number
price?: number
inputTokens?: number
outputTokens?: number
}

export const ChatGPTModels: Record<string, ChatModel> = {
'gpt-4': {
Expand Down

0 comments on commit 28304b7

Please sign in to comment.