Skip to content

Commit

Permalink
Enforce free plan at generation level (#2531)
Browse files Browse the repository at this point in the history
* Enforce free plan at generation level

* fix lint
  • Loading branch information
spolu authored Nov 14, 2023
1 parent 876cc17 commit 987d626
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 1 deletion.
19 changes: 18 additions & 1 deletion front/lib/api/assistant/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@ import {
getSupportedModelConfig,
GPT_4_32K_MODEL_ID,
GPT_4_MODEL_CONFIG,
isLargeModel,
} from "@app/lib/assistant";
import { Authenticator } from "@app/lib/auth";
import { CoreAPI } from "@app/lib/core_api";
import { FREE_TEST_PLAN_CODE } from "@app/lib/plans/plan_codes";
import { redisClient } from "@app/lib/redis";
import { Err, Ok, Result } from "@app/lib/result";
import logger from "@app/logger/logger";
Expand Down Expand Up @@ -311,7 +313,8 @@ export async function* runGeneration(
void
> {
const owner = auth.workspace();
if (!owner) {
const plan = auth.plan();
if (!owner || !plan) {
throw new Error("Unexpected unauthenticated call to `runGeneration`");
}

Expand All @@ -334,6 +337,20 @@ export async function* runGeneration(

let model = c.model;

if (isLargeModel(model) && plan.code === FREE_TEST_PLAN_CODE) {
yield {
type: "generation_error",
created: Date.now(),
configurationId: configuration.sId,
messageId: agentMessage.sId,
error: {
code: "free_plan_error",
message: `Free plan does not support large models. Please upgrade to a paid plan to use this model.`,
},
};
return;
}

const contextSize = getSupportedModelConfig(c.model).contextSize;

const MIN_GENERATION_TOKENS = 2048;
Expand Down
21 changes: 21 additions & 0 deletions front/lib/assistant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export const GPT_4_32K_MODEL_CONFIG = {
displayName: "GPT 4",
contextSize: 32768,
recommendedTopK: 32,
largeModel: true,
} as const;

export const GPT_4_MODEL_CONFIG = {
Expand All @@ -23,6 +24,7 @@ export const GPT_4_MODEL_CONFIG = {
displayName: "GPT 4",
contextSize: 8192,
recommendedTopK: 16,
largeModel: true,
};

export const GPT_4_TURBO_MODEL_CONFIG = {
Expand All @@ -31,6 +33,7 @@ export const GPT_4_TURBO_MODEL_CONFIG = {
displayName: "GPT 4",
contextSize: 128000,
recommendedTopK: 32,
largeModel: true,
} as const;

export const GPT_3_5_TURBO_16K_MODEL_CONFIG = {
Expand All @@ -39,6 +42,7 @@ export const GPT_3_5_TURBO_16K_MODEL_CONFIG = {
displayName: "GPT 3.5 Turbo",
contextSize: 16384,
recommendedTopK: 16,
largeModel: false,
} as const;

export const GPT_3_5_TURBO_MODEL_CONFIG = {
Expand All @@ -47,6 +51,7 @@ export const GPT_3_5_TURBO_MODEL_CONFIG = {
displayName: "GPT 3.5 Turbo",
contextSize: 4096,
recommendedTopK: 16,
largeModel: false,
} as const;

export const CLAUDE_DEFAULT_MODEL_CONFIG = {
Expand All @@ -55,6 +60,7 @@ export const CLAUDE_DEFAULT_MODEL_CONFIG = {
displayName: "Claude 2",
contextSize: 100000,
recommendedTopK: 32,
largeModel: true,
} as const;

export const CLAUDE_INSTANT_DEFAULT_MODEL_CONFIG = {
Expand All @@ -63,6 +69,7 @@ export const CLAUDE_INSTANT_DEFAULT_MODEL_CONFIG = {
displayName: "Claude Instant 1.2",
contextSize: 100000,
recommendedTopK: 32,
largeModel: false,
} as const;

export const MISTRAL_7B_DEFAULT_MODEL_CONFIG = {
Expand All @@ -71,6 +78,7 @@ export const MISTRAL_7B_DEFAULT_MODEL_CONFIG = {
displayName: "Mistral 7B",
contextSize: 8192,
recommendedTopK: 16,
largeModel: false,
} as const;

export const SUPPORTED_MODEL_CONFIGS = [
Expand Down Expand Up @@ -100,6 +108,19 @@ export function isSupportedModel(model: unknown): model is SupportedModel {
);
}

export function isLargeModel(model: unknown): model is SupportedModel {
const maybeSupportedModel = model as SupportedModel;
const m = SUPPORTED_MODEL_CONFIGS.find(
(m) =>
m.modelId === maybeSupportedModel.modelId &&
m.providerId === maybeSupportedModel.providerId
);
if (m) {
return m.largeModel;
}
return false;
}

export function getSupportedModelConfig(supportedModel: SupportedModel) {
// here it is safe to cast the result to non-nullable because SupportedModel
// is derived from the const array of configs above
Expand Down

0 comments on commit 987d626

Please sign in to comment.