diff --git a/src/modules/llms/openaiBot.ts b/src/modules/llms/openaiBot.ts index a761b88..c140fa2 100644 --- a/src/modules/llms/openaiBot.ts +++ b/src/modules/llms/openaiBot.ts @@ -137,6 +137,13 @@ export class OpenAIBot extends LlmsBase { return } + if ( + ctx.hasCommand([SupportedCommands.gpto])) { + this.updateSessionModel(ctx, LlmsModelsEnum.GPT_4O) + await this.onChat(ctx, LlmsModelsEnum.GPT_4O, true, false) + return + } + if (ctx.hasCommand([SupportedCommands.pdf, SupportedCommands.ctx]) && this.checkModel(ctx)) { await this.onChat(ctx, ctx.session.currentModel, true, false) return diff --git a/src/modules/llms/utils/helpers.ts b/src/modules/llms/utils/helpers.ts index 27a3d07..b43fcc8 100644 --- a/src/modules/llms/utils/helpers.ts +++ b/src/modules/llms/utils/helpers.ts @@ -45,6 +45,7 @@ export enum SupportedCommands { new = 'new', gpt4 = 'gpt4', ask32 = 'ask32', + gpto = 'gpto', gpt = 'gpt', last = 'last', dalle = 'dalle', diff --git a/src/modules/llms/utils/types.ts b/src/modules/llms/utils/types.ts index 1eee4b6..b21f936 100644 --- a/src/modules/llms/utils/types.ts +++ b/src/modules/llms/utils/types.ts @@ -10,7 +10,8 @@ export enum LlmsModelsEnum { GPT_4 = 'gpt-4', GPT_35_TURBO = 'gpt-3.5-turbo', GPT_35_TURBO_16K = 'gpt-3.5-turbo-16k', - GPT_4_VISION_PREVIEW = 'gpt-4-vision-preview' + GPT_4_VISION_PREVIEW = 'gpt-4-vision-preview', + GPT_4O = 'gpt-4o-2024-05-13' } export interface DalleGPTModel { @@ -110,6 +111,13 @@ export const LlmsModels: Record = { outputPrice: 0.06, // 6 maxContextTokens: 16000, chargeType: 'TOKEN' + }, + 'gpt-4o-2024-05-13': { + name: 'gpt-4o-2024-05-13', + inputPrice: 0.005, // 3 + outputPrice: 0.0015, // 6 + maxContextTokens: 128000, + chargeType: 'TOKEN' } }