Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable vision command on group chats + code snippet detection #350

Merged
merged 8 commits into from
Jan 23, 2024
21 changes: 6 additions & 15 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ export default {
model: 'chat-bison',
minimumBalance: 0,
isEnabled: Boolean(parseInt(process.env.LLMS_ENABLED ?? '1')),
prefixes: { bardPrefix: ['b.', 'B.'] },
pdfUrl: process.env.PDF_URL ?? '',
processingTime: 300000
},
Expand All @@ -65,30 +64,25 @@ export default {
image, and voice interactions using OpenAI’s chatgpt, Stable Diffusion, and more.
Respond flexibly, but try to stay within 100 words in your response.`,
webCrawlerContext: 'You will receive a web crawling text. Please get keys concepts, but try to stay within 4000 words in your response.',
visionCompletionContext: `You are a concise AI Bot powered by Harmony, capable of providing complete responses within a 100-word limit.
For each additional image, extend your response by 30 words. Your responses should be informative and comprehensive,
wrapping up all details without leaving them hanging. Use your flexibility to adapt to any topic, and deliver engaging and fulfilling
conversations in a succinct manner.`,
maxTokens: parseInt(process.env.OPENAI_MAX_TOKENS ?? '800'), // telegram messages has a char limit
wordLimit: 30,
wordCountBetween: 10,
// process.env.WORD_COUNT_BETWEEN
// ? parseInt(process.env.WORD_COUNT_BETWEEN)
// : 10,
priceAdjustment: process.env.PRICE_ADJUSTMENT
? parseInt(process.env.PRICE_ADJUSTMENT)
? parseFloat(process.env.PRICE_ADJUSTMENT)
: 2,
isFreePromptChatGroups: false,
isEnabled: Boolean(parseInt(process.env.CHAT_GPT_ENABLED ?? '1')),
isTypingEnabled: Boolean(
parseInt(process.env.TYPING_STATUS_ENABLED ?? '1')
),
model: process.env.OPENAI_MODEL ?? 'gpt-3.5-turbo',
prefixes: {
chatPrefix: process.env.ASK_PREFIX
? process.env.ASK_PREFIX.split(',')
: ['a.', '.'], // , "?", ">",
newPrefix: process.env.NEW_PREFIX
? process.env.NEW_PREFIX.split(',')
: ['n.', '..'],
llamaPrefix: ['*']
},
minimumBalance: parseInt(process.env.MIN_BALANCE ?? '0')
}
},
Expand All @@ -101,10 +95,7 @@ export default {
defaultRPC: 'https://api.harmony.one',
restrictedPhrases: process.env.RESTRICTED_PHRASES
? process.env.RESTRICTED_PHRASES.split(', ')
: ['metamask', 'walletconnect'],
registerPrefix: process.env.COUNTRY_PREFIX
? process.env.COUNTRY_PREFIX.split(',')
: ['+', '%']
: ['metamask', 'walletconnect']
},
voiceMemo: {
isEnabled: Boolean(parseInt(process.env.VOICE_MEMO_ENABLED ?? '1')),
Expand Down
41 changes: 16 additions & 25 deletions src/modules/1country/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,30 +11,21 @@ import { type OnMessageContext, type OnCallBackQueryData, type PayableBot, Reque
import { type BotPayments } from '../payment'
import { getCommandNamePrompt, getUrl } from './utils/'
import { isAdmin } from '../open-ai/utils/context'
import config from '../../config'
import { MAX_TRIES, sendMessage } from '../open-ai/helpers'
import { sleep } from '../sd-images/utils'
import { isValidUrl } from '../open-ai/utils/web-crawler'
import { now } from '../../utils/perf'

export const SupportedCommands = {
register: { name: 'rent' },
visit: { name: 'visit' },
check: { name: 'check' },
cert: { name: 'cert' },
nft: { name: 'nft' },
set: { name: 'set' }
export enum SupportedCommands {
register = 'rent',
visit = 'visit',
check = 'check',
cert = 'cert',
nft = 'nft',
set = 'set'
}

// enum SupportedCommands {
// CHECK = "check",
// NFT = "nft",
// VISIT = "visit",
// CERT = "cert",
// RENEW = "renew",
// NOTION = "notion",
// SUBDOMAIN = "subdomain",
// }
const COUNTRY_PREFIX_LIST = ['+', '%']

export class OneCountryBot implements PayableBot {
public readonly module = 'OneCountryBot'
Expand All @@ -58,7 +49,7 @@ export class OneCountryBot implements PayableBot {
ctx: OnMessageContext | OnCallBackQueryData
): boolean {
const hasCommand = ctx.hasCommand(
Object.values(SupportedCommands).map((command) => command.name)
Object.values(SupportedCommands).map((command) => command)
)
const hasPrefix = this.hasPrefix(ctx.message?.text ?? '')
if (hasPrefix && ctx.session.oneCountry.lastDomain) {
Expand All @@ -68,7 +59,7 @@ export class OneCountryBot implements PayableBot {
}

private hasPrefix (prompt: string): boolean {
const prefixList = config.country.registerPrefix
const prefixList = COUNTRY_PREFIX_LIST
for (let i = 0; i < prefixList.length; i++) {
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
return true
Expand All @@ -88,17 +79,17 @@ export class OneCountryBot implements PayableBot {
return
}

if (ctx.hasCommand(SupportedCommands.visit.name)) {
if (ctx.hasCommand(SupportedCommands.visit)) {
await this.onVistitCmd(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.check.name)) {
if (ctx.hasCommand(SupportedCommands.check)) {
await this.onCheckCmd(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.register.name)) {
if (ctx.hasCommand(SupportedCommands.register)) {
await this.onRegister(ctx)
return
}
Expand All @@ -108,17 +99,17 @@ export class OneCountryBot implements PayableBot {
return
}

if (ctx.hasCommand(SupportedCommands.nft.name)) {
if (ctx.hasCommand(SupportedCommands.nft)) {
await this.onNftCmd(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.cert.name)) {
if (ctx.hasCommand(SupportedCommands.cert)) {
await this.onCertCmd(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.set.name)) {
if (ctx.hasCommand(SupportedCommands.set)) {
await this.onSet(ctx)
return
}
Expand Down
22 changes: 11 additions & 11 deletions src/modules/llms/helpers.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import config from '../../config'
import {
type OnMessageContext,
type OnCallBackQueryData,
Expand All @@ -7,21 +6,22 @@ import {
type ChatPayload
} from '../types'
import { type ParseMode } from 'grammy/types'
// import { getChatModel, getChatModelPrice, getTokenNumber } from "./api/openAi";
import { LlmsModelsEnum } from './types'
import { type Message } from 'grammy/out/types'
import { llmAddUrlDocument } from './api/llmApi'

export const SupportedCommands = {
bardF: { name: 'bard' },
bard: { name: 'b' },
j2Ultra: { name: 'j2-ultra' },
sum: { name: 'sum' },
ctx: { name: 'ctx' },
pdf: { name: 'pdf' }
export enum SupportedCommands {
bardF = 'bard',
bard = 'b',
j2Ultra = 'j2-ultra',
sum = 'sum',
ctx = 'ctx',
pdf = 'pdf'
}

export const MAX_TRIES = 3
const LLAMA_PREFIX_LIST = ['*']
const BARD_PREFIX_LIST = ['b.', 'B.']

export const isMentioned = (
ctx: OnMessageContext | OnCallBackQueryData
Expand All @@ -40,7 +40,7 @@ export const isMentioned = (
}

export const hasLlamaPrefix = (prompt: string): string => {
const prefixList = config.openAi.chatGpt.prefixes.llamaPrefix
const prefixList = LLAMA_PREFIX_LIST
for (let i = 0; i < prefixList.length; i++) {
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
return prefixList[i]
Expand All @@ -50,7 +50,7 @@ export const hasLlamaPrefix = (prompt: string): string => {
}

export const hasBardPrefix = (prompt: string): string => {
const prefixList = config.llms.prefixes.bardPrefix
const prefixList = BARD_PREFIX_LIST
for (let i = 0; i < prefixList.length; i++) {
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
return prefixList[i]
Expand Down
12 changes: 6 additions & 6 deletions src/modules/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export class LlmsBot implements PayableBot {
ctx: OnMessageContext | OnCallBackQueryData
): boolean {
const hasCommand = ctx.hasCommand(
Object.values(SupportedCommands).map((command) => command.name)
Object.values(SupportedCommands).map((command) => command)
)
if (isMentioned(ctx)) {
return true
Expand Down Expand Up @@ -117,12 +117,12 @@ export class LlmsBot implements PayableBot {
return
}

if (ctx.hasCommand(SupportedCommands.pdf.name)) {
if (ctx.hasCommand(SupportedCommands.pdf)) {
await this.onPdfCommand(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.bard.name) || ctx.hasCommand(SupportedCommands.bardF.name)) {
if (ctx.hasCommand(SupportedCommands.bard) || ctx.hasCommand(SupportedCommands.bardF)) {
await this.onChat(ctx, LlmsModelsEnum.BISON)
return
}
Expand All @@ -142,17 +142,17 @@ export class LlmsBot implements PayableBot {
return
}

if (ctx.hasCommand(SupportedCommands.j2Ultra.name)) {
if (ctx.hasCommand(SupportedCommands.j2Ultra)) {
await this.onChat(ctx, LlmsModelsEnum.J2_ULTRA)
return
}

if (ctx.hasCommand(SupportedCommands.ctx.name)) {
if (ctx.hasCommand(SupportedCommands.ctx)) {
await this.onCurrentCollection(ctx)
return
}

if (ctx.hasCommand(SupportedCommands.sum.name) ||
if (ctx.hasCommand(SupportedCommands.sum) ||
(ctx.message?.text?.startsWith('sum ') && ctx.chat?.type === 'private')
) {
await this.onSum(ctx)
Expand Down
10 changes: 7 additions & 3 deletions src/modules/open-ai/api/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,10 @@ export const streamChatVisionCompletion = async (
const payload: any = {
model,
messages: [
{
role: 'system',
content: config.openAi.chatGpt.visionCompletionContext
},
{
role: 'user',
content: [
Expand Down Expand Up @@ -277,11 +281,11 @@ export const getChatModelPrice = (
model: ChatModel,
inCents = true,
inputTokens: number,
outPutTokens?: number
outputTokens?: number
): number => {
let price = model.inputPrice * inputTokens
price += outPutTokens
? outPutTokens * model.outputPrice
price += outputTokens
? outputTokens * model.outputPrice
: model.maxContextTokens * model.outputPrice
price = inCents ? price * 100 : price
return price / 1000
Expand Down
59 changes: 33 additions & 26 deletions src/modules/open-ai/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,31 @@ import { type ParseMode } from 'grammy/types'
import { getChatModel, getChatModelPrice, getTokenNumber } from './api/openAi'
import { type Message, type InlineKeyboardMarkup } from 'grammy/out/types'
import { isValidUrl } from './utils/web-crawler'
// import { llmAddUrlDocument } from '../llms/api/llmApi'

export const SupportedCommands = {
chat: { name: 'chat' },
ask: { name: 'ask' },
vision: { name: 'vision' },
ask35: { name: 'ask35' },
new: { name: 'new' },
gpt4: { name: 'gpt4' },
ask32: { name: 'ask32' },
gpt: { name: 'gpt' },
last: { name: 'last' },
dalle: { name: 'dalle' },
dalleImg: { name: 'image' },
dalleShort: { name: 'img' },
dalleShorter: { name: 'i' },
genImgEn: { name: 'genImgEn' },
on: { name: 'on' },
off: { name: 'off' }
export enum SupportedCommands {
chat = 'chat',
ask = 'ask',
vision = 'vision',
ask35 = 'ask35',
new = 'new',
gpt4 = 'gpt4',
ask32 = 'ask32',
gpt = 'gpt',
last = 'last',
dalle = 'dalle',
dalleImg = 'image',
dalleShort = 'img',
dalleShorter = 'i',
genImgEn = 'genImgEn',
on = 'on',
off = 'off'
}

export const MAX_TRIES = 3

const DALLE_PREFIX_LIST = ['i. ', ',', 'image ', 'd.', 'img ']
export const DALLE_PREFIX_LIST = ['i. ', ',', 'image ', 'd.', 'img ']
export const CHAT_GPT_PREFIX_LIST = ['a.', '.']
export const NEW_PREFIX_LIST = ['n.', '..']

export const isMentioned = (
ctx: OnMessageContext | OnCallBackQueryData
Expand All @@ -46,7 +47,7 @@ export const isMentioned = (
}

export const hasChatPrefix = (prompt: string): string => {
const prefixList = config.openAi.chatGpt.prefixes.chatPrefix
const prefixList = CHAT_GPT_PREFIX_LIST
for (let i = 0; i < prefixList.length; i++) {
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
return prefixList[i]
Expand All @@ -66,7 +67,7 @@ export const hasDallePrefix = (prompt: string): string => {
}

export const hasNewPrefix = (prompt: string): string => {
const prefixList = config.openAi.chatGpt.prefixes.newPrefix
const prefixList = NEW_PREFIX_LIST
for (let i = 0; i < prefixList.length; i++) {
if (prompt.toLocaleLowerCase().startsWith(prefixList[i])) {
return prefixList[i]
Expand All @@ -88,6 +89,11 @@ const hasUrlPrompt = (prompt: string): string => {
return url
}

export const hasCodeSnippet = (ctx: OnMessageContext | OnCallBackQueryData): boolean => {
const entities = ctx.entities('pre') // pre => code snippets
return entities.length > 0
}

export const hasUrl = (
ctx: OnMessageContext | OnCallBackQueryData,
prompt: string
Expand Down Expand Up @@ -232,23 +238,24 @@ export const hasPrefix = (prompt: string): string => {
)
}

export const getPromptPrice = (completion: string, data: ChatPayload): { price: number, promptTokens: number, completionTokens: number } => {
export const getPromptPrice = (completion: string, data: ChatPayload): { price: number, promptTokens: number, completionTokens: number, totalTokens: number } => {
const { conversation, ctx, model } = data

const currentUsage = data.prompt ? 0 : ctx.session.openAi.chatGpt.usage
const prompt = data.prompt ? data.prompt : conversation[conversation.length - 1].content
const promptTokens = getTokenNumber(prompt as string)
const promptTokens = getTokenNumber(prompt as string) + currentUsage
const completionTokens = getTokenNumber(completion)
const modelPrice = getChatModel(model)
const price =
getChatModelPrice(modelPrice, true, promptTokens, completionTokens) *
config.openAi.chatGpt.priceAdjustment
conversation.push({ content: completion, role: 'system' })
ctx.session.openAi.chatGpt.usage += promptTokens + completionTokens
ctx.session.openAi.chatGpt.usage += completionTokens
ctx.session.openAi.chatGpt.price += price
return {
price,
promptTokens,
completionTokens
completionTokens,
totalTokens: data.prompt ? promptTokens + completionTokens : ctx.session.openAi.chatGpt.usage
}
}

Expand Down
Loading