From 61f6e89d6e8512d7dfb702871662047a93e0e7cf Mon Sep 17 00:00:00 2001 From: Santthosh Selvadurai Date: Sun, 5 May 2024 21:03:05 -0700 Subject: [PATCH] #55 Adding feature toggles and place holders for the Documents tab --- .../migration.sql | 2 + prisma/schema.prisma | 1 + prisma/seed.ts | 92 ++++++++++++++++--- src/app/api/models/[id]/route.ts | 31 +++++++ src/app/assistants/[id]/SideNavigation.tsx | 7 ++ src/app/assistants/[id]/client.ts | 21 +++++ src/app/assistants/[id]/documents/page.tsx | 51 ++++++++++ src/app/types/model.ts | 3 + 8 files changed, 197 insertions(+), 11 deletions(-) create mode 100644 prisma/migrations/20240505164457_adding_feature_map_so_we_can_enable_and_disable_based_on_model_design/migration.sql create mode 100644 src/app/api/models/[id]/route.ts create mode 100644 src/app/assistants/[id]/documents/page.tsx diff --git a/prisma/migrations/20240505164457_adding_feature_map_so_we_can_enable_and_disable_based_on_model_design/migration.sql b/prisma/migrations/20240505164457_adding_feature_map_so_we_can_enable_and_disable_based_on_model_design/migration.sql new file mode 100644 index 0000000..c6e5437 --- /dev/null +++ b/prisma/migrations/20240505164457_adding_feature_map_so_we_can_enable_and_disable_based_on_model_design/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "Model" ADD COLUMN "features" JSONB; diff --git a/prisma/schema.prisma b/prisma/schema.prisma index 19d445c..5e1b804 100644 --- a/prisma/schema.prisma +++ b/prisma/schema.prisma @@ -25,6 +25,7 @@ model Model { providerId String provider ModelProvider @relation(fields: [providerId], references: [id]) Assistant Assistant[] + features Json? } model Organization { diff --git a/prisma/seed.ts b/prisma/seed.ts index a1a350c..d00aba6 100644 --- a/prisma/seed.ts +++ b/prisma/seed.ts @@ -1,4 +1,4 @@ -import { PrismaClient } from '@prisma/client'; +import { PrismaClient, Prisma } from '@prisma/client'; const prisma = new PrismaClient(); async function main() { @@ -29,18 +29,29 @@ async function main() { const gpt35turbo = await prisma.model.upsert({ where: { id: 'gpt-3.5-turbo' }, - update: {}, + update: { + features: { + retrieval: true, + } as Prisma.JsonObject + }, create: { id: 'gpt-3.5-turbo', name: 'GPT-3.5 Turbo', description: 'Currently points to gpt-3.5-turbo-0125', url: 'https://platform.openai.com/docs/models/gpt-3-5-turbo', providerId: 'openai', + features: { + retrieval: true, + } as Prisma.JsonObject }, }); const gpt35turbo0125 = await prisma.model.upsert({ where: { id: 'gpt-3.5-turbo-0125' }, - update: {}, + update: { + features: { + retrieval: true, + } as Prisma.JsonObject + }, create: { id: 'gpt-3.5-turbo-0125', name: 'GPT-3.5 Turbo 0125', @@ -48,22 +59,36 @@ async function main() { 'Updated The latest GPT-3.5 Turbo model with higher accuracy at responding in requested formats.', url: 'https://platform.openai.com/docs/models/gpt-3-5-turbo', providerId: 'openai', + features: { + retrieval: true, + } as Prisma.JsonObject }, }); const gpt35turbo16k = await prisma.model.upsert({ where: { id: 'gpt-3.5-turbo-16k' }, - update: {}, + update: { + features: { + retrieval: true, + } as Prisma.JsonObject + }, create: { id: 'gpt-3.5-turbo-16k', name: 'GPT-3.5 Turbo 16K', description: '[Legacy] Currently points to gpt-3.5-turbo-16k-0613', url: 'https://platform.openai.com/docs/models/gpt-3-5-turbo', providerId: 'openai', + features: { + retrieval: true, + } as Prisma.JsonObject }, }); const gpt4 = await prisma.model.upsert({ where: { id: 'gpt-4' }, - update: {}, + update: { + features: { + retrieval: true, + } as Prisma.JsonObject + }, create: { id: 'gpt-4', name: 'GPT-4', @@ -71,11 +96,18 @@ async function main() { 'Currently points to gpt-4-0613. See continuous model upgrades.Snapshot of gpt-4 from June 13th 2023 with improved function calling support.', url: 'https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4', providerId: 'openai', + features: { + retrieval: true, + } as Prisma.JsonObject }, }); const gpt4turbo = await prisma.model.upsert({ where: { id: 'gpt-4-turbo' }, - update: {}, + update: { + features: { + retrieval: true, + } as Prisma.JsonObject + }, create: { id: 'gpt-4-turbo', name: 'GPT-4 Turbo', @@ -83,24 +115,38 @@ async function main() { 'GPT-4 Turbo with Vision. The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling. Currently points to gpt-4-turbo-2024-04-09', url: 'https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4', providerId: 'openai', + features: { + retrieval: true, + } as Prisma.JsonObject }, }); const geminipro = await prisma.model.upsert({ where: { id: 'gemini-1.5-pro-latest' }, - update: {}, + update: { + features: { + retrieval: false, + } as Prisma.JsonObject + }, create: { id: 'gemini-1.5-pro-latest', name: 'Gemini Pro 1.5', description: 'The latest model from Google', url: 'https://ai.google.dev/gemini-api/docs/api-overview', providerId: 'google', + features: { + retrieval: false, + } as Prisma.JsonObject }, }); const groq_llama3_8b = await prisma.model.upsert({ where: { id: 'llama3-8b-8192' }, - update: {}, + update: { + features: { + retrieval: false, + } as Prisma.JsonObject + }, create: { id: 'llama3-8b-8192', name: 'LLaMA3 8b', @@ -108,12 +154,19 @@ async function main() { 'The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.', url: 'https://console.groq.com/docs/models#llama3-8b', providerId: 'groq', + features: { + retrieval: false, + } as Prisma.JsonObject }, }); const groq_llama3_70b = await prisma.model.upsert({ where: { id: 'llama3-70b-8192' }, - update: {}, + update: { + features: { + retrieval: false, + } as Prisma.JsonObject + }, create: { id: 'llama3-70b-8192', name: 'LLaMA3 70b', @@ -121,12 +174,19 @@ async function main() { 'The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. ', url: 'https://console.groq.com/docs/models#llama3-70b', providerId: 'groq', + features: { + retrieval: false, + } as Prisma.JsonObject }, }); const groq_mixtral_7b = await prisma.model.upsert({ where: { id: 'mixtral-8x7b-32768' }, - update: {}, + update: { + features: { + retrieval: false, + } as Prisma.JsonObject + }, create: { id: 'mixtral-8x7b-32768', name: 'Mistral 8x7b', @@ -134,12 +194,19 @@ async function main() { 'The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms Llama 2 70B on most benchmarks we tested.', url: 'https://console.groq.com/docs/models#mixtral-8x7b', providerId: 'groq', + features: { + retrieval: false, + } as Prisma.JsonObject }, }); const groq_gemma_7b = await prisma.model.upsert({ where: { id: 'gemma-7b-it' }, - update: {}, + update: { + features: { + retrieval: false, + } as Prisma.JsonObject + }, create: { id: 'gemma-7b-it', name: 'Google Gemma 7B', @@ -147,6 +214,9 @@ async function main() { 'Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.', url: 'https://console.groq.com/docs/models#gemma-7b', providerId: 'groq', + features: { + retrieval: false, + } as Prisma.JsonObject }, }); } diff --git a/src/app/api/models/[id]/route.ts b/src/app/api/models/[id]/route.ts new file mode 100644 index 0000000..bd3a1c0 --- /dev/null +++ b/src/app/api/models/[id]/route.ts @@ -0,0 +1,31 @@ +import OpenAI from 'openai'; +import { PrismaClient } from '@prisma/client'; +import { getToken } from 'next-auth/jwt'; +import { NextRequest, NextResponse } from 'next/server'; + +const prisma = new PrismaClient(); + +const getId = (req: Request) => { + const url = new URL(req.url); + return url.pathname.split('/').splice(-1, 1)[0]; +}; + +// Note: We should not cache the models list as it may change frequently for different organizations +export async function GET(req: NextRequest, res: NextResponse) { + const token = await getToken({ req }); + if (token) { + let id = getId(req); + let model = await prisma.model.findFirst({ + where: { + id: id, + }, + include: { + provider: true, + }, + }); + return Response.json(model, { status: 200 }); + } else { + // Not Signed in + return Response.json({ message: 'Unauthenticated' }, { status: 401 }); + } +} \ No newline at end of file diff --git a/src/app/assistants/[id]/SideNavigation.tsx b/src/app/assistants/[id]/SideNavigation.tsx index 71217dc..8f16d3e 100644 --- a/src/app/assistants/[id]/SideNavigation.tsx +++ b/src/app/assistants/[id]/SideNavigation.tsx @@ -4,6 +4,7 @@ import { Avatar, Badge, Card, Sidebar } from 'flowbite-react'; import { HiColorSwatch, HiChatAlt2, + HiFolder, HiCog, HiChartBar, HiPuzzle, @@ -75,6 +76,12 @@ export default function SideNavigation() { > Conversations + + Documents + (null); + + useEffect(() => { + if (assistant.modelId) { + getModel(assistant.modelId).then(([status,response]) => { + console.log(response); + setModel(response); + setLoading(false); + }); + } + }, []); + + return ( +
+

Documents

+ { + loading ? ( +
+ +
+ ) : ( +
+ { model?.features?.retrieval ? ( +
+ This model supports document retrieval, we are working on it +
+ ) : ( +
+ +

Support for documents is not available for {model? model.id : 'this model'} yet.

+
+ ) + } +
+ ) + } +
+ ); +} diff --git a/src/app/types/model.ts b/src/app/types/model.ts index 77c9a72..5588e5b 100644 --- a/src/app/types/model.ts +++ b/src/app/types/model.ts @@ -9,4 +9,7 @@ export interface Model { description: string; url: string; provider: ModelProvider; + features?: { + retrieval: boolean; + } }