diff --git a/modules/ai/chat.ts b/modules/ai/chat.ts index c3b070f..a369dc7 100644 --- a/modules/ai/chat.ts +++ b/modules/ai/chat.ts @@ -6,7 +6,7 @@ import { createFailResponse } from '../backend-node'; import { handleRequestError } from '../session/api-helper'; import { countToken, countTokenForString } from './encoding'; -import { ChatModel, DEFAULT_MAX_TOKEN, MAX_TOKENS, ChatOptions, ErrorResponse } from './constants'; +import { ChatModel, DEFAULT_MAX_TOKEN, MAX_TOKENS, ChatOptions, ErrorResponse, ModelPointer } from './constants'; import { ChatCompletion, ChatCompletionInStream } from './types'; import { getChatCompletionSupport } from './platform'; import { checkRequest, RequestControlError } from './request-control'; @@ -68,7 +68,7 @@ export async function chat(options: ChatOptions) { const content = { stream: true, - model, + model: ModelPointer[model] ?? model, user: support.user, // max_tokens: maxToken - token, ...other, diff --git a/modules/ai/constants.ts b/modules/ai/constants.ts index 628e1ce..915bcf1 100644 --- a/modules/ai/constants.ts +++ b/modules/ai/constants.ts @@ -98,6 +98,14 @@ export enum ChatModel { GPT_4_32K = 'gpt-4-32k', } +/** + * ๆจกๅž‹ๆŒ‡้’ˆ + */ +export const ModelPointer: Record = { + [ChatModel.GPT_4]: 'gpt-4-1106-preview', + [ChatModel.GPT_4_32K]: 'gpt-4-1106-preview', +}; + export const ALL_SUPPORTED_CHAT_MODEL = [ ChatModel.GPT3_5_TURBO, ChatModel.GPT3_5_TURBO_16K,