diff --git a/front/components/providers/types.ts b/front/components/providers/types.ts index 7fb5e4bebe57..72611bbfc253 100644 --- a/front/components/providers/types.ts +++ b/front/components/providers/types.ts @@ -10,6 +10,8 @@ import { CLAUDE_3_5_HAIKU_DEFAULT_MODEL_CONFIG, CLAUDE_3_5_SONNET_DEFAULT_MODEL_CONFIG, DEEPSEEK_CHAT_MODEL_CONFIG, + GEMINI_2_FLASH_PREVIEW_MODEL_CONFIG, + GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_CONFIG, GEMINI_FLASH_DEFAULT_MODEL_CONFIG, GEMINI_PRO_DEFAULT_MODEL_CONFIG, GPT_4_TURBO_MODEL_CONFIG, @@ -53,6 +55,8 @@ export const USED_MODEL_CONFIGS: readonly ModelConfig[] = [ MISTRAL_CODESTRAL_MODEL_CONFIG, GEMINI_PRO_DEFAULT_MODEL_CONFIG, GEMINI_FLASH_DEFAULT_MODEL_CONFIG, + GEMINI_2_FLASH_PREVIEW_MODEL_CONFIG, + GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_CONFIG, TOGETHERAI_LLAMA_3_3_70B_INSTRUCT_TURBO_MODEL_CONFIG, TOGETHERAI_QWEN_2_5_CODER_32B_INSTRUCT_MODEL_CONFIG, TOGETHERAI_QWEN_32B_PREVIEW_MODEL_CONFIG, diff --git a/front/pages/api/w/[wId]/providers/[pId]/models.ts b/front/pages/api/w/[wId]/providers/[pId]/models.ts index a10f05446bbc..ae20245dcf68 100644 --- a/front/pages/api/w/[wId]/providers/[pId]/models.ts +++ b/front/pages/api/w/[wId]/providers/[pId]/models.ts @@ -1,4 +1,10 @@ import type { WithAPIErrorResponse } from "@dust-tt/types"; +import { + GEMINI_1_5_FLASH_LATEST_MODEL_ID, + GEMINI_1_5_PRO_LATEST_MODEL_ID, + GEMINI_2_FLASH_PREVIEW_MODEL_ID, + GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_ID, +} from "@dust-tt/types"; import type { NextApiRequest, NextApiResponse } from "next"; import { withSessionAuthenticationForWorkspace } from "@app/lib/api/auth_wrappers"; @@ -237,8 +243,10 @@ async function handler( case "google_ai_studio": return res.status(200).json({ models: [ - { id: "gemini-1.5-flash-latest" }, - { id: "gemini-1.5-pro-latest" }, + { id: GEMINI_1_5_FLASH_LATEST_MODEL_ID }, + { id: GEMINI_1_5_PRO_LATEST_MODEL_ID }, + { id: GEMINI_2_FLASH_PREVIEW_MODEL_ID }, + { id: GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_ID }, ], }); diff --git a/sdks/js/src/types.ts b/sdks/js/src/types.ts index cdeb639d3268..c9a9928b6090 100644 --- a/sdks/js/src/types.ts +++ b/sdks/js/src/types.ts @@ -43,6 +43,8 @@ const ModelLLMIdSchema = FlexibleEnumSchema< | "codestral-latest" | "gemini-1.5-pro-latest" | "gemini-1.5-flash-latest" + | "gemini-2.0-flash-exp" + | "gemini-2.0-flash-thinking-exp-1219" | "meta-llama/Llama-3.3-70B-Instruct-Turbo" | "Qwen/Qwen2.5-Coder-32B-Instruct" | "Qwen/QwQ-32B-Preview" @@ -666,6 +668,7 @@ const WhitelistableFeaturesSchema = FlexibleEnumSchema< | "openai_o1_custom_assistants_feature" | "openai_o1_high_reasoning_custom_assistants_feature" | "deepseek_feature" + | "google_ai_studio_experimental_models_feature" | "snowflake_connector_feature" | "index_private_slack_channel" | "conversations_jit_actions" diff --git a/types/src/front/lib/assistant.ts b/types/src/front/lib/assistant.ts index 6a5bdd1414c4..d5947cd06d75 100644 --- a/types/src/front/lib/assistant.ts +++ b/types/src/front/lib/assistant.ts @@ -114,6 +114,9 @@ export const MISTRAL_CODESTRAL_MODEL_ID = "codestral-latest" as const; export const GEMINI_1_5_PRO_LATEST_MODEL_ID = "gemini-1.5-pro-latest" as const; export const GEMINI_1_5_FLASH_LATEST_MODEL_ID = "gemini-1.5-flash-latest" as const; +export const GEMINI_2_FLASH_PREVIEW_MODEL_ID = "gemini-2.0-flash-exp" as const; +export const GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_ID = + "gemini-2.0-flash-thinking-exp-1219" as const; export const TOGETHERAI_LLAMA_3_3_70B_INSTRUCT_TURBO_MODEL_ID = "meta-llama/Llama-3.3-70B-Instruct-Turbo" as const; export const TOGETHERAI_QWEN_2_5_CODER_32B_INSTRUCT_MODEL_ID = @@ -145,6 +148,8 @@ export const MODEL_IDS = [ MISTRAL_CODESTRAL_MODEL_ID, GEMINI_1_5_PRO_LATEST_MODEL_ID, GEMINI_1_5_FLASH_LATEST_MODEL_ID, + GEMINI_2_FLASH_PREVIEW_MODEL_ID, + GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_ID, TOGETHERAI_LLAMA_3_3_70B_INSTRUCT_TURBO_MODEL_ID, TOGETHERAI_QWEN_2_5_CODER_32B_INSTRUCT_MODEL_ID, TOGETHERAI_QWEN_32B_PREVIEW_MODEL_ID, @@ -570,6 +575,39 @@ export const GEMINI_FLASH_DEFAULT_MODEL_CONFIG: ModelConfigurationType = { supportsVision: false, }; +export const GEMINI_2_FLASH_PREVIEW_MODEL_CONFIG: ModelConfigurationType = { + providerId: "google_ai_studio", + modelId: GEMINI_2_FLASH_PREVIEW_MODEL_ID, + displayName: "Gemini Flash 2.0", + contextSize: 1_000_000, + recommendedTopK: 64, + recommendedExhaustiveTopK: 128, + largeModel: true, + description: + "Google's lightweight, fast and cost-efficient model (1m context).", + shortDescription: "Google's cost-effective model (preview).", + isLegacy: false, + supportsVision: true, + featureFlag: "google_ai_studio_experimental_models_feature", +}; + +export const GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_CONFIG: ModelConfigurationType = + { + providerId: "google_ai_studio", + modelId: GEMINI_2_FLASH_THINKING_PREVIEW_MODEL_ID, + displayName: "Gemini Flash 2.0 Thinking", + contextSize: 32_000, + recommendedTopK: 64, + recommendedExhaustiveTopK: 128, + largeModel: true, + description: + "Google's lightweight model optimized for reasoning (1m context).", + shortDescription: "Google's reasoning-focused model (preview).", + isLegacy: false, + supportsVision: true, + featureFlag: "google_ai_studio_experimental_models_feature", + }; + export const TOGETHERAI_LLAMA_3_3_70B_INSTRUCT_TURBO_MODEL_CONFIG: ModelConfigurationType = { providerId: "togetherai", diff --git a/types/src/shared/feature_flags.ts b/types/src/shared/feature_flags.ts index 6c9ffb5d82ec..e9c2d47200cf 100644 --- a/types/src/shared/feature_flags.ts +++ b/types/src/shared/feature_flags.ts @@ -12,6 +12,7 @@ export const WHITELISTABLE_FEATURES = [ "openai_o1_custom_assistants_feature", "openai_o1_high_reasoning_custom_assistants_feature", "deepseek_feature", + "google_ai_studio_experimental_models_feature", "index_private_slack_channel", "conversations_jit_actions", "disable_run_logs",