Skip to content

Commit

Permalink
fix url llamaSubagent logic + improve user prompt and bot context
Browse files Browse the repository at this point in the history
  • Loading branch information
fegloff committed Apr 13, 2024
1 parent b13f260 commit f455545
Show file tree
Hide file tree
Showing 9 changed files with 101 additions and 95 deletions.
6 changes: 3 additions & 3 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ export default {
}
},
chatGpt: {
chatCompletionContext: `You are an AI Bot powered by Harmony. Your strengths are ai api aggregation for chat,
image, and voice interactions using OpenAI’s chatgpt, Stable Diffusion, and more.
Respond flexibly, but try to stay within 100 words in your response.`,
chatCompletionContext:
'You are an AI Bot powered by Harmony. Your strengths are ai api aggregation for chat, image, and voice interactions. Leveraging a suite of sophisticated subagents, you have the capability to perform tasks such as internet browsing and accessing various services. Your responses should be adaptable to the conversation while maintaining brevity, ideally not exceeding 100 words.',
// 'You are an AI Bot powered dby Harmony. Your strengths are ai api aggregation for chat, image, and voice interactions, and more. You have subagents that helps you with task like browsing the internet, and other services. Respond flexibly, but try to stay within 100 words in all of your responses.',
webCrawlerContext: 'You will receive a web crawling text. Please get keys concepts, but try to stay within 4000 words in your response.',
visionCompletionContext: `You are a concise AI Bot powered by Harmony, capable of providing complete responses within a 100-word limit.
For each additional image, extend your response by 30 words. Your responses should be informative and comprehensive,
Expand Down
24 changes: 16 additions & 8 deletions src/modules/llms/llmsBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ export abstract class LlmsBase implements PayableBot {
}

protected async runSubagents (ctx: OnMessageContext | OnCallBackQueryData, msg: ChatConversation): Promise<void> {
// const id = ctx.message?.message_id ?? ctx.message?.reply_to_message?.message_thread_id ?? 0
const result = await Promise.all(this.subagents.map(async (agent: SubagentBase) =>
await agent.run(ctx, msg)))
const agentsCompletion = result.filter(agent => agent.status === SubagentStatus.PROCESSING)
Expand Down Expand Up @@ -131,7 +130,7 @@ export abstract class LlmsBase implements PayableBot {
const msg = {
id: ctx.message?.message_id ?? ctx.message?.message_thread_id ?? 0,
model,
content: await preparePrompt(ctx, prompt as string),
content: prompt as string ?? '', // await preparePrompt(ctx, prompt as string),
numSubAgents: supportedAgents
}
await this.runSubagents(ctx, msg) // prompt as string)
Expand All @@ -151,7 +150,7 @@ export abstract class LlmsBase implements PayableBot {
}
}
}))
const agentsCompletion = SubagentBase.getSubagents(ctx, msg.id ?? 0)
const agentsCompletion = SubagentBase.getRunningSubagents(ctx, msg.id ?? 0)
if (agentsCompletion && agentsCompletion.length > 0) {
session.requestQueue.push(msg)
if (!session.isProcessingQueue) {
Expand All @@ -170,6 +169,7 @@ export abstract class LlmsBase implements PayableBot {
const msg = session.requestQueue.shift()
const prompt = msg?.content as string
const model = msg?.model
let agentCompletions: string[] = []
const { chatConversation } = session
const minBalance = await getMinBalance(ctx, msg?.model as LlmsModelsEnum)
let enhancedPrompt = ''
Expand All @@ -189,16 +189,24 @@ export abstract class LlmsBase implements PayableBot {
return
}
if (msg?.numSubAgents && msg?.numSubAgents > 0 && msg.id) {
const agents = SubagentBase.getSubagents(ctx, msg.id)
const agents = SubagentBase.getRunningSubagents(ctx, msg.id)
if (agents) {
const agentCompletions = agents.map((agent: SubagentResult) => agent.completion)
enhancedPrompt = prompt.concat(...agentCompletions)
console.log(enhancedPrompt)
SubagentBase.deleteCompletion(ctx, msg.id)
agentCompletions = agents.map((agent: SubagentResult) => agent.completion + `${'\n'}`)
enhancedPrompt = ''.concat(...agentCompletions)
enhancedPrompt += prompt
this.logger.info(`Enhanced prompt: ${enhancedPrompt}`)
SubagentBase.deleteRunningSubagents(ctx, msg.id)
} else {
continue
}
}
if (chatConversation.length === 0) {
chatConversation.push({
role: 'system',
content: config.openAi.chatGpt.chatCompletionContext
})
}
// const hasCode = hasCodeSnippet(ctx)
const chat: ChatConversation = {
content: enhancedPrompt || prompt,
role: 'user',
Expand Down
31 changes: 2 additions & 29 deletions src/modules/llms/openaiBot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import {
hasDallePrefix,
hasNewPrefix,
isMentioned,
PRICE_ADJUSTMENT,
sendMessage,
SupportedCommands
} from './utils/helpers'
Expand All @@ -23,11 +22,9 @@ import { now } from '../../utils/perf'
import { appText } from '../../utils/text'
import {
chatCompletion,
getDalleModel,
getDalleModelPrice,
streamChatCompletion
} from './api/openai'
import { LlamaAgent } from '../subagents/llamaAgent'
import { LlamaAgent } from '../subagents'

export class OpenAIBot extends LlmsBase {
constructor (payments: BotPayments) {
Expand All @@ -40,31 +37,6 @@ export class OpenAIBot extends LlmsBase {

public getEstimatedPrice (ctx: any): number {
try {
const prompts = ctx.match
// if (this.isSupportedImageReply(ctx) && !isNaN(+prompts)) {
// const imageNumber = ctx.message?.caption || ctx.message?.text
// const imageSize = ctx.session.openAi.imageGen.imgSize
// const model = getDalleModel(imageSize)
// const price = getDalleModelPrice(model, true, imageNumber) // cents
// return price * priceAdjustment
// }
if (!prompts) {
return 0
}
if (
ctx.hasCommand([
SupportedCommands.dalle,
SupportedCommands.dalleImg,
SupportedCommands.dalleShort,
SupportedCommands.dalleShorter
])
) {
const imageNumber = ctx.session.openAi.imageGen.numImages
const imageSize = ctx.session.openAi.imageGen.imgSize
const model = getDalleModel(imageSize)
const price = getDalleModelPrice(model, true, imageNumber) // cents
return price * PRICE_ADJUSTMENT
}
return 0
} catch (e) {
Sentry.captureException(e)
Expand Down Expand Up @@ -177,6 +149,7 @@ export class OpenAIBot extends LlmsBase {
if (ctx.hasCommand(SupportedCommands.ask32)) {
session.model = LlmsModelsEnum.GPT_4_32K
await this.onChat(ctx, LlmsModelsEnum.GPT_4_32K, true)
return
}

if (ctx.hasCommand(SupportedCommands.last)) {
Expand Down
29 changes: 20 additions & 9 deletions src/modules/llms/utils/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,19 +77,25 @@ export const isMentioned = (
return false
}

export const getUrlFromText = (ctx: OnMessageContext | OnCallBackQueryData): string[] | undefined => {
const entities = ctx.message?.entities ? ctx.message?.entities : ctx.message?.reply_to_message?.entities
const text = ctx.message?.text ? ctx.message?.text : ctx.message?.reply_to_message?.text
if (entities && text) {
const urlEntity = entities.filter(e => e.type === 'url')
if (urlEntity.length > 0) {
const urls = urlEntity.map(e => text.slice(e.offset, e.offset + e.length))
return urls
}
export const getMsgEntities = (ctx: OnMessageContext | OnCallBackQueryData, filter: string): string[] | undefined => {
const msgEntities = ctx.message?.entities?.filter(e => e.type === filter)
const msg = ctx.message?.text
if (msgEntities && msgEntities?.length > 0 && msg) {
return msgEntities.map(e => msg.slice(e.offset, e.offset + e.length))
}
const replyEntities = ctx.update.message?.reply_to_message?.entities?.filter(e => e.type === filter)
const reply = ctx.message?.reply_to_message?.text
if (replyEntities && replyEntities?.length > 0 && reply) {
return replyEntities.map(e => reply.slice(e.offset, e.offset + e.length))
}
return undefined
}

export const getUrlFromText = (ctx: OnMessageContext | OnCallBackQueryData): string[] | undefined => {
const entities = getMsgEntities(ctx, 'url')
return entities
}

export const promptHasBadWords = (prompt: string): boolean => {
const lowerCasePrompt = prompt.toLowerCase()

Expand Down Expand Up @@ -334,3 +340,8 @@ export const getMinBalance = async (ctx: OnMessageContext | OnCallBackQueryData,
}, false)
return minBalance.price
}

export const hasCodeSnippet = (ctx: OnMessageContext | OnCallBackQueryData): boolean => {
const entities = ctx.entities('pre') // pre => code snippets
return entities.length > 0
}
2 changes: 1 addition & 1 deletion src/modules/llms/vertexBot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import { LlmsModelsEnum } from './utils/types'

import { LlmsBase } from './llmsBase'
import { vertexCompletion, vertexStreamCompletion } from './api/vertex'
import { LlamaAgent } from '../subagents/llamaAgent'
import { LlamaAgent } from '../subagents'
export class VertexBot extends LlmsBase {
constructor (payments: BotPayments) {
super(payments, 'VertexBot', 'llms')
Expand Down
2 changes: 2 additions & 0 deletions src/modules/subagents/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
export * from './subagentBase'
export * from './llamaSubagent'
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { type BotPayments } from '../payment'
import { now } from '../../utils/perf'
import { SubagentBase } from './subagentBase'
import { getUrlFromText } from '../llms/utils/helpers'
import { getMsgEntities } from '../llms/utils/helpers'
import { llmAddUrlDocument, llmCheckCollectionStatus, queryUrlDocument } from '../llms/api/llmApi'
import { ErrorHandler } from '../errorhandler'
import { sleep } from '../sd-images/utils'
Expand Down Expand Up @@ -60,6 +60,7 @@ export class LlamaAgent extends SubagentBase {
})
}
} else {
collection.agentId = id
await this.queryUrlCollection(ctx, urls[0], msg.content as string)
}
const agent: SubagentResult = {
Expand Down Expand Up @@ -117,7 +118,10 @@ export class LlamaAgent extends SubagentBase {
}

public isSupportedUrl (ctx: OnMessageContext | OnCallBackQueryData): string[] | undefined {
return getUrlFromText(ctx)
if (ctx.chat?.type === 'private') {
return getMsgEntities(ctx, 'url')
}
return undefined
}

private async addUrlToCollection (ctx: OnMessageContext | OnCallBackQueryData, chatId: number, url: string, prompt: string): Promise<void> {
Expand Down Expand Up @@ -167,22 +171,22 @@ export class LlamaAgent extends SubagentBase {
})
}

public async onPdfReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
try {
const fileName = this.isSupportedPdfReply(ctx)
const prompt = ctx.message?.text ?? 'Summarize this context'
if (fileName !== '') {
const collection = ctx.session.collections.activeCollections.find(c => c.fileName === fileName)
if (collection) {
await this.queryUrlCollection(ctx, collection.url, prompt)
}
}
ctx.transient.analytics.actualResponseTime = now()
} catch (e: any) {
this.logger.error(`onPdfReplyHandler error: ${e}`)
throw e
}
}
// public async onPdfReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
// try {
// const fileName = this.isSupportedPdfReply(ctx)
// const prompt = ctx.message?.text ?? 'Summarize this context'
// if (fileName !== '') {
// const collection = ctx.session.collections.activeCollections.find(c => c.fileName === fileName)
// if (collection) {
// await this.queryUrlCollection(ctx, collection.url, prompt)
// }
// }
// ctx.transient.analytics.actualResponseTime = now()
// } catch (e: any) {
// this.logger.error(`onPdfReplyHandler error: ${e}`)
// throw e
// }
// }

private getCollectionConversation (ctx: OnMessageContext | OnCallBackQueryData, collection: Collection): ChatConversation[] {
if (ctx.session.collections.currentCollection === collection.collectionName) {
Expand Down Expand Up @@ -217,26 +221,27 @@ export class LlamaAgent extends SubagentBase {
}
}

async onUrlReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
try {
const url = getUrlFromText(ctx)
if (url) {
const prompt = ctx.message?.text ?? 'summarize'
const collection = ctx.session.collections.activeCollections.find(c => c.url === url[0])
const newPrompt = `${prompt}` // ${url}
if (collection) {
await this.queryUrlCollection(ctx, url[0], newPrompt)
}
ctx.transient.analytics.actualResponseTime = now()
}
} catch (e: any) {
this.logger.error(`onUrlReplyHandler: ${e.toString()}`)
throw e
}
}
// async onUrlReplyHandler (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
// try {
// const url = getUrlFromText(ctx)
// if (url) {
// const prompt = ctx.message?.text ?? 'summarize'
// const collection = ctx.session.collections.activeCollections.find(c => c.url === url[0])
// const newPrompt = `${prompt}` // ${url}
// if (collection) {
// await this.queryUrlCollection(ctx, url[0], newPrompt)
// }
// ctx.transient.analytics.actualResponseTime = now()
// }
// } catch (e: any) {
// this.logger.error(`onUrlReplyHandler: ${e.toString()}`)
// throw e
// }
// }

async onCheckCollectionStatus (ctx: OnMessageContext | OnCallBackQueryData): Promise<void> {
const processingTime = config.llms.processingTime
const session = this.getSession(ctx)
while (ctx.session.collections.collectionRequestQueue.length > 0) {
try {
const collection = ctx.session.collections.collectionRequestQueue.shift()
Expand Down Expand Up @@ -288,7 +293,7 @@ export class LlamaAgent extends SubagentBase {
}
await ctx.api.editMessageText(ctx.chat?.id ?? '', collection.msgId, statusMsg,
{ link_preview_options: { is_disabled: true } })
ctx.session.subagents.running.push({
session.running.push({
id: collection.agentId ?? 0,
name: this.name,
completion: '',
Expand Down Expand Up @@ -341,7 +346,7 @@ export class LlamaAgent extends SubagentBase {
session.running.push({
id: collection.agentId ?? 0,
name: this.name,
completion: this.completionContext.replace('%COMPLETION%', response.completion),
completion: this.completionContext.replace('%AGENT_OUTPUT%', response.completion).replace('%URL%', collection.url),
status: SubagentStatus.DONE
})
}
Expand Down
15 changes: 8 additions & 7 deletions src/modules/subagents/subagentBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import {
type BotSessionData,
type SubagentSessionData,
type SubagentResult,
SubagentStatus,
type ChatConversation
} from '../types'
import { appText } from '../../utils/text'
Expand Down Expand Up @@ -66,11 +65,11 @@ export abstract class SubagentBase implements PayableBot {
return (ctx.session[this.sessionDataKey as keyof BotSessionData] as SubagentSessionData)
}

public static deleteCompletion (ctx: OnMessageContext | OnCallBackQueryData, id: number): void {
ctx.session.subagents.running = ctx.session.subagents.running.filter(agent => agent.id === id)
public static deleteRunningSubagents (ctx: OnMessageContext | OnCallBackQueryData, id: number): void {
ctx.session.subagents.running = ctx.session.subagents.running.filter(agent => agent.id !== id)
}

public static getSubagents (ctx: OnMessageContext | OnCallBackQueryData, id: number): SubagentResult[] | undefined {
public static getRunningSubagents (ctx: OnMessageContext | OnCallBackQueryData, id: number): SubagentResult[] | undefined {
return ctx.session.subagents.running.filter(agent => agent.id === id)
}

Expand All @@ -81,12 +80,14 @@ export abstract class SubagentBase implements PayableBot {
const agent = session.subagentsRequestQueue.shift()
if (agent) {
const result = await this.checkStatus(ctx, agent)
if (!result || result.status === SubagentStatus.PROCESSING) {
if (!result) { // || result.status === SubagentStatus.PROCESSING) {
session.subagentsRequestQueue.push(agent)
await sleep(3000)
} else {
session.running.push(agent)
}
// else {
// console.log('onCheckAgentStatus', agent)
// session.running.push(agent)
// }
}
ctx.transient.analytics.actualResponseTime = now()
} catch (e: any) {
Expand Down
8 changes: 7 additions & 1 deletion src/utils/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,13 @@ Adjust image size or how many images are generated`,
'Your credits: $CREDITS ONE tokens. To recharge, send ONE to `$WALLET_ADDRESS`',
maliciousPrompt:
'Your prompt has been flagged for potentially generating illegal or malicious content. If you believe there has been a mistake, please reach out to support.',
llamaURLContext: 'This is URL handler subagent completion: %COMPLETION%'
llamaURLContext: 'Based on the information gathered from the URL (%URL%): %AGENT_OUTPUT%'
// financialContext: 'Based of the financial data from this company (%COMPANY%): %AGENT_OUTPUT%'
// please provide a detailed explanation:

// %AGENT_OUTPUT%`
// '''
// 'This is the web crawler context of the given URL(https://deepmind.google/technologies/gemini/#gemini-1.0): %COMPLETION%'
}

// <b>Edit an Image</b>
Expand Down

0 comments on commit f455545

Please sign in to comment.