From 6b4b66a346213397a7c953ba08a7ff71afe84c37 Mon Sep 17 00:00:00 2001 From: Simon Vergauwen Date: Fri, 30 Jun 2023 11:10:27 +0200 Subject: [PATCH 1/2] Add missing context functions (#212) --- .../functional/xef/java/auto/AIScope.java | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java b/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java index 674cbce27..378e9a6fb 100644 --- a/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java +++ b/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java @@ -112,6 +112,25 @@ public CompletableFuture> promptMessage(String prompt, LLMModel llm return future(continuation -> scope.promptMessage(prompt, llmModel, functions, user, echo, n, temperature, bringFromContext, minResponseTokens, continuation)); } + public CompletableFuture extendContext(String[] docs) { + return future(continuation -> scope.extendContext(docs, continuation)) + .thenApply(unit -> null); + } + + public CompletableFuture contextScope(Function1> f) { + return future(continuation -> scope.contextScope((coreAIScope, continuation1) -> { + AIScope nestedScope = new AIScope(coreAIScope, AIScope.this); + return FutureKt.await(f.invoke(nestedScope), continuation); + }, continuation)); + } + + public CompletableFuture contextScope(VectorStore store, Function1> f) { + return future(continuation -> scope.contextScope(store, (coreAIScope, continuation1) -> { + AIScope nestedScope = new AIScope(coreAIScope, AIScope.this); + return FutureKt.await(f.invoke(nestedScope), continuation); + }, continuation)); + } + public CompletableFuture contextScope(List docs, Function1> f) { return future(continuation -> scope.contextScopeWithDocs(docs, (coreAIScope, continuation1) -> { AIScope nestedScope = new AIScope(coreAIScope, AIScope.this); From c0af078c0293d3f4e8d233571bdd026142cad0a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Raja=20Mart=C3=ADnez?= Date: Fri, 30 Jun 2023 12:36:53 +0200 Subject: [PATCH 2/2] Generic AI client and models + open-ai client lib (#196) * Generic AI client and models with open-ai client impl from https://github.com/aallam/openai-kotlin * type LLM models based on their capabilities and type the operations * add token as parameter to `openAI` fn falling back to env variable * add config as optional parameter * remove old config * adapt to latest changes from main and new java module * have openai be its own module that depends on xef-core. kotlin, scala and java depends on openai module for defaults. xef core does not depend on open ai * fix bug in scala fn name for serialization * make AIClient : AutoCloseable * Rename enum cases * Rename to TEXT_EMBEDDING_ADA_002 * Fix AIClient close expectation * fix for overriding user provided service * remove star import --------- Co-authored-by: Simon Vergauwen --- core/build.gradle.kts | 2 - .../com/xebia/functional/xef/AIError.kt | 2 +- .../com/xebia/functional/xef/auto/AI.kt | 82 +----- .../xebia/functional/xef/auto/AIRuntime.kt | 36 +-- .../xebia/functional/xef/auto/CoreAIScope.kt | 49 ++-- .../com/xebia/functional/xef/auto/Solution.kt | 31 -- .../functional/xef/embeddings/Embeddings.kt | 2 +- .../com/xebia/functional/xef/env/config.kt | 21 -- .../com/xebia/functional/xef/llm/AIClient.kt | 28 ++ .../com/xebia/functional/xef/llm/LLMModel.kt | 65 +++++ .../xef/llm/huggingface/HuggingFaceClient.kt | 39 --- .../functional/xef/llm/huggingface/models.kt | 10 - .../llm/models/chat/ChatCompletionRequest.kt | 16 ++ .../ChatCompletionRequestWithFunctions.kt | 20 ++ .../llm/models/chat/ChatCompletionResponse.kt | 12 + .../ChatCompletionResponseWithFunctions.kt | 12 + .../functional/xef/llm/models/chat/Choice.kt | 3 + .../llm/models/chat/ChoiceWithFunctions.kt | 7 + .../functional/xef/llm/models/chat/Message.kt | 3 + .../models/chat/MessageWithFunctionCall.kt | 10 + .../functional/xef/llm/models/chat/Role.kt | 8 + .../xef/llm/models/embeddings/Embedding.kt | 3 + .../llm/models/embeddings/EmbeddingModel.kt | 5 + .../llm/models/embeddings/EmbeddingRequest.kt | 3 + .../llm/models/embeddings/EmbeddingResult.kt | 5 + .../llm/models/embeddings/RequestConfig.kt | 9 + .../xef/llm/models/functions/CFunction.kt | 3 + .../xef/llm/models/functions/FunctionCall.kt | 3 + .../llm/models/images/ImageGenerationUrl.kt | 3 + .../models/images/ImagesGenerationRequest.kt | 9 + .../models/images/ImagesGenerationResponse.kt | 3 + .../xef/llm/models/text/CompletionChoice.kt | 8 + .../xef/llm/models/text/CompletionRequest.kt | 20 ++ .../xef/llm/models/text/CompletionResult.kt | 12 + .../functional/xef/llm/models/usage/Usage.kt | 7 + .../functional/xef/llm/openai/AIClient.kt | 18 -- .../xef/llm/openai/KtorOpenAIClient.kt | 133 --------- .../functional/xef/llm/openai/MockAIClient.kt | 68 ----- .../llm/openai/images/ImageGenerationUrl.kt | 5 - .../openai/images/ImagesGenerationRequest.kt | 13 - .../openai/images/ImagesGenerationResponse.kt | 6 - .../xebia/functional/xef/llm/openai/models.kt | 265 ------------------ .../xef/vectorstores/LocalVectorStore.kt | 6 +- examples/kotlin/build.gradle.kts | 1 + .../functional/xef/auto/CustomRuntime.kt | 25 +- .../xebia/functional/xef/auto/Population.kt | 5 +- .../functional/xef/auto/tot/ControlSignal.kt | 1 - .../xebia/functional/xef/auto/tot/Critique.kt | 1 - .../com/xebia/functional/xef/auto/tot/Main.kt | 2 +- .../functional/xef/auto/tot/Rendering.kt | 9 - .../xebia/functional/xef/auto/tot/Solution.kt | 1 - gradle/libs.versions.toml | 2 + .../functional/xef/vectorstores/Lucene.kt | 22 +- .../xef/vectorstores/PostgreSQLVectorStore.kt | 4 +- .../src/test/kotlin/xef/PGVectorStoreSpec.kt | 6 +- java/build.gradle.kts | 3 +- .../functional/xef/java/auto/AIScope.java | 47 ++-- kotlin/build.gradle.kts | 3 +- .../com/xebia/functional/xef/auto/AIScope.kt | 40 +++ .../xef/auto/DeserializerLLMAgent.kt | 12 +- .../xef/auto/ImageGenerationAgent.kt | 2 +- openai/build.gradle.kts | 183 ++++++++++++ .../xef/auto/llm/openai/MockAIClient.kt | 124 ++++++++ .../xef/auto/llm/openai/OpenAIClient.kt | 258 +++++++++++++++++ .../xef/auto/llm/openai}/OpenAIEmbeddings.kt | 16 +- .../xef/auto/llm/openai/OpenAIRuntime.kt | 47 ++++ .../functional/xef/scala/auto/package.scala | 21 +- settings.gradle.kts | 3 + 68 files changed, 1073 insertions(+), 830 deletions(-) delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/auto/Solution.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/env/config.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/AIClient.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/LLMModel.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/HuggingFaceClient.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/models.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequest.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequestWithFunctions.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponse.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponseWithFunctions.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Choice.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChoiceWithFunctions.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/MessageWithFunctionCall.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Role.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/Embedding.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingModel.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingRequest.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingResult.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/RequestConfig.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/CFunction.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/FunctionCall.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImageGenerationUrl.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationRequest.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationResponse.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionChoice.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionRequest.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionResult.kt create mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/usage/Usage.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/AIClient.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/KtorOpenAIClient.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/MockAIClient.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImageGenerationUrl.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationRequest.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationResponse.kt delete mode 100644 core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/models.kt create mode 100644 openai/build.gradle.kts create mode 100644 openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/MockAIClient.kt create mode 100644 openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt rename {core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings => openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai}/OpenAIEmbeddings.kt (61%) create mode 100644 openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIRuntime.kt diff --git a/core/build.gradle.kts b/core/build.gradle.kts index b45cfc2b6..87b0f9d42 100644 --- a/core/build.gradle.kts +++ b/core/build.gradle.kts @@ -68,8 +68,6 @@ kotlin { api(libs.bundles.ktor.client) api(projects.xefTokenizer) - // implementation(libs.arrow.fx.stm) - implementation(libs.uuid) implementation(libs.klogging) } diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/AIError.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/AIError.kt index 75cd12704..771e6d53a 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/AIError.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/AIError.kt @@ -1,7 +1,7 @@ package com.xebia.functional.xef import arrow.core.NonEmptyList -import com.xebia.functional.xef.llm.openai.Message +import com.xebia.functional.xef.llm.models.chat.Message import kotlin.jvm.JvmOverloads sealed class AIError @JvmOverloads constructor(message: String, cause: Throwable? = null) : diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AI.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AI.kt index de503cc20..8afe0c330 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AI.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AI.kt @@ -1,18 +1,8 @@ package com.xebia.functional.xef.auto -import arrow.core.Either -import arrow.core.left -import arrow.core.right import com.xebia.functional.xef.AIError -import com.xebia.functional.xef.embeddings.OpenAIEmbeddings -import com.xebia.functional.xef.env.OpenAIConfig -import com.xebia.functional.xef.llm.openai.AIClient -import com.xebia.functional.xef.llm.openai.LLMModel -import com.xebia.functional.xef.llm.openai.MockOpenAIClient -import com.xebia.functional.xef.llm.openai.simpleMockAIClient -import com.xebia.functional.xef.vectorstores.LocalVectorStore +import com.xebia.functional.xef.llm.AIClient import com.xebia.functional.xef.vectorstores.VectorStore -import kotlin.time.ExperimentalTime @DslMarker annotation class AiDsl @@ -29,79 +19,9 @@ typealias AI = suspend CoreAIScope.() -> A /** A DSL block that makes it more convenient to construct [AI] values. */ inline fun ai(noinline block: suspend CoreAIScope.() -> A): AI = block -/** - * Run the [AI] value to produce an [A], this method initialises all the dependencies required to - * run the [AI] value and once it finishes it closes all the resources. - * - * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. - */ -suspend inline fun AI.getOrElse( - runtime: AIRuntime = AIRuntime.openAI(), - crossinline orElse: suspend (AIError) -> A -): A = AIScope(runtime, this) { orElse(it) } - suspend fun AIScope(runtime: AIRuntime, block: AI, orElse: suspend (AIError) -> A): A = try { runtime.runtime(block) } catch (e: AIError) { orElse(e) } - -@OptIn(ExperimentalTime::class) -suspend fun MockAIScope( - mockClient: MockOpenAIClient, - block: suspend CoreAIScope.() -> A, - orElse: suspend (AIError) -> A -): A = - try { - val embeddings = OpenAIEmbeddings(OpenAIConfig(), mockClient) - val vectorStore = LocalVectorStore(embeddings) - val scope = - CoreAIScope( - LLMModel.GPT_3_5_TURBO, - LLMModel.GPT_3_5_TURBO_FUNCTIONS, - mockClient, - vectorStore, - embeddings - ) - block(scope) - } catch (e: AIError) { - orElse(e) - } - -/** - * Run the [AI] value to produce _either_ an [AIError], or [A]. this method initialises all the - * dependencies required to run the [AI] value and once it finishes it closes all the resources. - * - * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. - * - * @see getOrElse for an operator that allow directly handling the [AIError] case. - */ -suspend inline fun AI.toEither(): Either = - ai { invoke().right() }.getOrElse { it.left() } - -/** - * Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to - * compute the different responses. - */ -suspend fun AI.mock(mockAI: MockOpenAIClient): Either = - MockAIScope(mockAI, { invoke().right() }, { it.left() }) - -/** - * Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to - * compute the different responses. - */ -suspend fun AI.mock(mockAI: (String) -> String): Either = - MockAIScope(simpleMockAIClient(mockAI), { invoke().right() }, { it.left() }) - -/** - * Run the [AI] value to produce [A]. this method initialises all the dependencies required to run - * the [AI] value and once it finishes it closes all the resources. - * - * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. - * - * @throws AIError in case something went wrong. - * @see getOrElse for an operator that allow directly handling the [AIError] case instead of - * throwing. - */ -suspend inline fun AI.getOrThrow(): A = getOrElse { throw it } diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIRuntime.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIRuntime.kt index 2f90a5589..312c8531e 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIRuntime.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIRuntime.kt @@ -1,32 +1,12 @@ package com.xebia.functional.xef.auto -import com.xebia.functional.xef.embeddings.OpenAIEmbeddings -import com.xebia.functional.xef.env.OpenAIConfig -import com.xebia.functional.xef.llm.openai.KtorOpenAIClient -import com.xebia.functional.xef.llm.openai.LLMModel -import com.xebia.functional.xef.vectorstores.LocalVectorStore -import kotlin.jvm.JvmStatic -import kotlin.time.ExperimentalTime +import com.xebia.functional.xef.embeddings.Embeddings +import com.xebia.functional.xef.llm.AIClient -data class AIRuntime(val runtime: suspend (block: AI) -> A) { - companion object { - @OptIn(ExperimentalTime::class) - @JvmStatic - fun openAI(): AIRuntime = AIRuntime { block -> - val openAIConfig = OpenAIConfig() - KtorOpenAIClient(openAIConfig).use { openAiClient -> - val embeddings = OpenAIEmbeddings(openAIConfig, openAiClient) - val vectorStore = LocalVectorStore(embeddings) - val scope = - CoreAIScope( - defaultModel = LLMModel.GPT_3_5_TURBO_16K, - defaultSerializationModel = LLMModel.GPT_3_5_TURBO_FUNCTIONS, - aiClient = openAiClient, - context = vectorStore, - embeddings = embeddings - ) - block(scope) - } - } - } +data class AIRuntime( + val client: AIClient, + val embeddings: Embeddings, + val runtime: suspend (block: AI) -> A +) { + companion object } diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/CoreAIScope.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/CoreAIScope.kt index 8e5a24f76..e22ba22d0 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/CoreAIScope.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/CoreAIScope.kt @@ -7,10 +7,17 @@ import com.xebia.functional.tokenizer.ModelType import com.xebia.functional.tokenizer.truncateText import com.xebia.functional.xef.AIError import com.xebia.functional.xef.embeddings.Embeddings -import com.xebia.functional.xef.llm.openai.* -import com.xebia.functional.xef.llm.openai.CFunction -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationRequest -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.AIClient +import com.xebia.functional.xef.llm.LLM +import com.xebia.functional.xef.llm.LLMModel +import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequest +import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequestWithFunctions +import com.xebia.functional.xef.llm.models.chat.Message +import com.xebia.functional.xef.llm.models.chat.Role +import com.xebia.functional.xef.llm.models.functions.CFunction +import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.models.text.CompletionRequest import com.xebia.functional.xef.prompt.Prompt import com.xebia.functional.xef.vectorstores.CombinedVectorStore import com.xebia.functional.xef.vectorstores.LocalVectorStore @@ -25,8 +32,8 @@ import kotlin.jvm.JvmName * programs. */ class CoreAIScope( - val defaultModel: LLMModel, - val defaultSerializationModel: LLMModel, + val defaultModel: LLM.Chat, + val defaultSerializationModel: LLM.ChatWithFunctions, val aiClient: AIClient, val context: VectorStore, val embeddings: Embeddings, @@ -117,7 +124,7 @@ class CoreAIScope( functions: List, serializer: (json: String) -> A, maxDeserializationAttempts: Int = this.maxDeserializationAttempts, - model: LLMModel = defaultSerializationModel, + model: LLM.ChatWithFunctions = defaultSerializationModel, user: String = this.user, echo: Boolean = this.echo, numberOfPredictions: Int = this.numberOfPredictions, @@ -162,7 +169,7 @@ class CoreAIScope( @AiDsl suspend fun promptMessage( question: String, - model: LLMModel = defaultModel, + model: LLM.Chat = defaultModel, functions: List = emptyList(), user: String = this.user, echo: Boolean = this.echo, @@ -186,7 +193,7 @@ class CoreAIScope( @AiDsl suspend fun promptMessage( prompt: Prompt, - model: LLMModel = defaultModel, + model: LLM.Chat = defaultModel, functions: List = emptyList(), user: String = this.user, echo: Boolean = this.echo, @@ -249,7 +256,7 @@ class CoreAIScope( } suspend fun buildChatRequest(): ChatCompletionRequest { - val messages: List = listOf(Message(Role.system.name, promptWithContext)) + val messages: List = listOf(Message(Role.SYSTEM.name, promptWithContext)) return ChatCompletionRequest( model = model.name, user = user, @@ -261,7 +268,7 @@ class CoreAIScope( } suspend fun chatWithFunctionsRequest(): ChatCompletionRequestWithFunctions { - val role: String = Role.user.name + val role: String = Role.USER.name val firstFnName: String? = functions.firstOrNull()?.name val messages: List = listOf(Message(role, promptWithContext)) return ChatCompletionRequestWithFunctions( @@ -276,15 +283,15 @@ class CoreAIScope( ) } - return when (model.kind) { - LLMModel.Kind.Completion -> + return when (model) { + is LLM.Completion -> aiClient.createCompletion(buildCompletionRequest()).choices.map { it.text } - LLMModel.Kind.Chat -> - aiClient.createChatCompletion(buildChatRequest()).choices.map { it.message.content } - LLMModel.Kind.ChatWithFunctions -> - aiClient.createChatCompletionWithFunctions(chatWithFunctionsRequest()).choices.map { - it.message.functionCall.arguments + is LLM.ChatWithFunctions -> + aiClient.createChatCompletionWithFunctions(chatWithFunctionsRequest()).choices.mapNotNull { + it.message?.functionCall?.arguments } + else -> + aiClient.createChatCompletion(buildChatRequest()).choices.mapNotNull { it.message?.content } } } @@ -323,16 +330,16 @@ class CoreAIScope( } else prompt } - private fun tokensFromMessages(messages: List, model: LLMModel): Int { + private fun tokensFromMessages(messages: List, model: LLM): Int { fun Encoding.countTokensFromMessages(tokensPerMessage: Int, tokensPerName: Int): Int = messages.sumOf { message -> countTokens(message.role) + - countTokens(message.content) + + (message.content?.let { countTokens(it) } ?: 0) + tokensPerMessage + (message.name?.let { tokensPerName } ?: 0) } + 3 - fun fallBackTo(fallbackModel: LLMModel, paddingTokens: Int): Int { + fun fallBackTo(fallbackModel: LLM, paddingTokens: Int): Int { logger.debug { "Warning: ${model.name} may change over time. " + "Returning messages num tokens assuming ${fallbackModel.name} + $paddingTokens padding tokens." diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/Solution.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/Solution.kt deleted file mode 100644 index 43a9d845b..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/auto/Solution.kt +++ /dev/null @@ -1,31 +0,0 @@ -package com.xebia.functional.xef.auto - -import kotlinx.serialization.Serializable - -@Serializable -data class Solution(val objective: String, val result: String, val objectiveAccomplished: Boolean) { - val prompt = - """ - |Objective: $objective - |Result: $result - |Accomplishes objective: $objectiveAccomplished - """ - .trimMargin() -} - -@Serializable -data class AdditionalTasks(val objective: String, val tasks: List) { - val prompt = - """ - |Objective: $objective - |Tasks: ${tasks.joinToString("\n")} - """ - .trimMargin() -} - -@Serializable -data class Reassurance( - val objective: String, - val objectiveAccomplished: Boolean, - val tasksWouldHelpAccomplishObjective: Map -) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/Embeddings.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/Embeddings.kt index 1f378d458..7f72b75d7 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/Embeddings.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/Embeddings.kt @@ -1,6 +1,6 @@ package com.xebia.functional.xef.embeddings -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig data class Embedding(val data: List) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/env/config.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/env/config.kt deleted file mode 100644 index 085747411..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/env/config.kt +++ /dev/null @@ -1,21 +0,0 @@ -package com.xebia.functional.xef.env - -import kotlin.jvm.JvmOverloads - -data class OpenAIConfig -@JvmOverloads -constructor( - val token: String = - requireNotNull(getenv("OPENAI_TOKEN")) { "OpenAI Token missing from environment." }, - val baseUrl: String = "https://api.openai.com/v1/", - val chunkSize: Int = 300, - val requestTimeoutMillis: Long = 30_000 -) - -data class HuggingFaceConfig -@JvmOverloads -constructor( - val token: String = - requireNotNull(getenv("OPENAI_TOKEN")) { "OpenAI Token missing from environment." }, - val baseUrl: String = "https://api-inference.huggingface.co/" -) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/AIClient.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/AIClient.kt new file mode 100644 index 000000000..4cabb75d0 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/AIClient.kt @@ -0,0 +1,28 @@ +package com.xebia.functional.xef.llm + +import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequest +import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequestWithFunctions +import com.xebia.functional.xef.llm.models.chat.ChatCompletionResponse +import com.xebia.functional.xef.llm.models.chat.ChatCompletionResponseWithFunctions +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingResult +import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.models.text.CompletionRequest +import com.xebia.functional.xef.llm.models.text.CompletionResult + +interface AIClient : AutoCloseable { + suspend fun createCompletion(request: CompletionRequest): CompletionResult + + suspend fun createChatCompletion(request: ChatCompletionRequest): ChatCompletionResponse + + suspend fun createChatCompletionWithFunctions( + request: ChatCompletionRequestWithFunctions + ): ChatCompletionResponseWithFunctions + + suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult + + suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse + + override fun close() {} +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/LLMModel.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/LLMModel.kt new file mode 100644 index 000000000..8a19464c6 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/LLMModel.kt @@ -0,0 +1,65 @@ +package com.xebia.functional.xef.llm + +import com.xebia.functional.tokenizer.ModelType +import kotlin.jvm.JvmStatic + +sealed interface LLM { + val name: String + val modelType: ModelType + + interface Chat : LLM + + interface Completion : LLM + + interface ChatWithFunctions : Chat + + interface Embedding : LLM + + interface Images : LLM { + suspend fun createImage() + } +} + +sealed class LLMModel(override val name: String, override val modelType: ModelType) : LLM { + + data class Chat(override val name: String, override val modelType: ModelType) : + LLMModel(name, modelType), LLM.Chat + + data class Completion(override val name: String, override val modelType: ModelType) : + LLMModel(name, modelType), LLM.Completion + + data class ChatWithFunctions(override val name: String, override val modelType: ModelType) : + LLMModel(name, modelType), LLM.ChatWithFunctions + + data class Embedding(override val name: String, override val modelType: ModelType) : + LLMModel(name, modelType), LLM.Embedding + + companion object { + @JvmStatic val GPT_4 = Chat("gpt-4", ModelType.GPT_4) + + @JvmStatic val GPT_4_0314 = Chat("gpt-4-0314", ModelType.GPT_4) + + @JvmStatic val GPT_4_32K = Chat("gpt-4-32k", ModelType.GPT_4_32K) + + @JvmStatic val GPT_3_5_TURBO = Chat("gpt-3.5-turbo", ModelType.GPT_3_5_TURBO) + + @JvmStatic val GPT_3_5_TURBO_16K = Chat("gpt-3.5-turbo-16k", ModelType.GPT_3_5_TURBO_16_K) + + @JvmStatic + val GPT_3_5_TURBO_FUNCTIONS = + ChatWithFunctions("gpt-3.5-turbo-0613", ModelType.GPT_3_5_TURBO_FUNCTIONS) + + @JvmStatic val GPT_3_5_TURBO_0301 = Chat("gpt-3.5-turbo-0301", ModelType.GPT_3_5_TURBO) + + @JvmStatic val TEXT_DAVINCI_003 = Completion("text-davinci-003", ModelType.TEXT_DAVINCI_003) + + @JvmStatic val TEXT_DAVINCI_002 = Completion("text-davinci-002", ModelType.TEXT_DAVINCI_002) + + @JvmStatic + val TEXT_CURIE_001 = Completion("text-curie-001", ModelType.TEXT_SIMILARITY_CURIE_001) + + @JvmStatic val TEXT_BABBAGE_001 = Completion("text-babbage-001", ModelType.TEXT_BABBAGE_001) + + @JvmStatic val TEXT_ADA_001 = Completion("text-ada-001", ModelType.TEXT_ADA_001) + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/HuggingFaceClient.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/HuggingFaceClient.kt deleted file mode 100644 index 609cc9763..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/HuggingFaceClient.kt +++ /dev/null @@ -1,39 +0,0 @@ -package com.xebia.functional.xef.llm.huggingface - -import com.xebia.functional.xef.configure -import com.xebia.functional.xef.env.HuggingFaceConfig -import io.ktor.client.HttpClient -import io.ktor.client.call.body -import io.ktor.client.plugins.HttpTimeout -import io.ktor.client.plugins.contentnegotiation.ContentNegotiation -import io.ktor.client.plugins.defaultRequest -import io.ktor.client.request.post -import io.ktor.http.path -import io.ktor.serialization.kotlinx.json.json - -interface HuggingFaceClient { - suspend fun generate(request: InferenceRequest, model: Model): List -} - -class KtorHuggingFaceClient(private val config: HuggingFaceConfig) : - HuggingFaceClient, AutoCloseable { - - private val httpClient: HttpClient = HttpClient { - install(HttpTimeout) - install(ContentNegotiation) { json() } - defaultRequest { url(config.baseUrl) } - } - - override suspend fun generate(request: InferenceRequest, model: Model): List { - val response = - httpClient.post { - url { path("models", model.name) } - configure(config.token, request) - } - return response.body() - } - - override fun close() { - httpClient.close() - } -} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/models.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/models.kt deleted file mode 100644 index e21ea72c7..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/huggingface/models.kt +++ /dev/null @@ -1,10 +0,0 @@ -package com.xebia.functional.xef.llm.huggingface - -import kotlin.jvm.JvmInline -import kotlinx.serialization.Serializable - -@Serializable data class Generation(val generatedText: String) - -@Serializable data class InferenceRequest(val inputs: String, val maxLength: Int = 1000) - -@Serializable @JvmInline value class Model(val name: String) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequest.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequest.kt new file mode 100644 index 000000000..4200f6331 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequest.kt @@ -0,0 +1,16 @@ +package com.xebia.functional.xef.llm.models.chat + +data class ChatCompletionRequest( + val model: String, + val messages: List, + val temperature: Double = 0.0, + val topP: Double = 1.0, + val n: Int = 1, + val stream: Boolean = false, + val stop: List? = null, + val maxTokens: Int? = null, + val presencePenalty: Double = 0.0, + val frequencyPenalty: Double = 0.0, + val logitBias: Map = emptyMap(), + val user: String? +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequestWithFunctions.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequestWithFunctions.kt new file mode 100644 index 000000000..80245d4cd --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionRequestWithFunctions.kt @@ -0,0 +1,20 @@ +package com.xebia.functional.xef.llm.models.chat + +import com.xebia.functional.xef.llm.models.functions.CFunction + +data class ChatCompletionRequestWithFunctions( + val model: String, + val messages: List, + val functions: List = emptyList(), + val temperature: Double = 0.0, + val topP: Double = 1.0, + val n: Int = 1, + val stream: Boolean = false, + val stop: List? = null, + val maxTokens: Int? = null, + val presencePenalty: Double = 0.0, + val frequencyPenalty: Double = 0.0, + val logitBias: Map = emptyMap(), + val user: String?, + val functionCall: Map, +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponse.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponse.kt new file mode 100644 index 000000000..7a4f2580e --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponse.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.xef.llm.models.chat + +import com.xebia.functional.xef.llm.models.usage.Usage + +data class ChatCompletionResponse( + val id: String, + val `object`: String, + val created: Int, + val model: String, + val usage: Usage, + val choices: List +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponseWithFunctions.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponseWithFunctions.kt new file mode 100644 index 000000000..8aa4d14ac --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChatCompletionResponseWithFunctions.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.xef.llm.models.chat + +import com.xebia.functional.xef.llm.models.usage.Usage + +data class ChatCompletionResponseWithFunctions( + val id: String, + val `object`: String, + val created: Int, + val model: String, + val usage: Usage, + val choices: List +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Choice.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Choice.kt new file mode 100644 index 000000000..d75109491 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Choice.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.chat + +data class Choice(val message: Message?, val finishReason: String?, val index: Int?) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChoiceWithFunctions.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChoiceWithFunctions.kt new file mode 100644 index 000000000..0828db977 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/ChoiceWithFunctions.kt @@ -0,0 +1,7 @@ +package com.xebia.functional.xef.llm.models.chat + +data class ChoiceWithFunctions( + val message: MessageWithFunctionCall?, + val finishReason: String?, + val index: Int? +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt new file mode 100644 index 000000000..30fc0e8f5 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.chat + +data class Message(val role: String, val content: String?, val name: String? = Role.ASSISTANT.name) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/MessageWithFunctionCall.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/MessageWithFunctionCall.kt new file mode 100644 index 000000000..eeeb6a3ef --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/MessageWithFunctionCall.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.xef.llm.models.chat + +import com.xebia.functional.xef.llm.models.functions.FunctionCall + +data class MessageWithFunctionCall( + val role: String, + val content: String? = null, + val functionCall: FunctionCall?, + val name: String? = Role.ASSISTANT.name +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Role.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Role.kt new file mode 100644 index 000000000..7eb3fa41b --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Role.kt @@ -0,0 +1,8 @@ +package com.xebia.functional.xef.llm.models.chat + +enum class Role { + SYSTEM, + USER, + ASSISTANT, + FUNCTION +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/Embedding.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/Embedding.kt new file mode 100644 index 000000000..b2d355d6a --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/Embedding.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.embeddings + +class Embedding(val `object`: String, val embedding: List, val index: Int) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingModel.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingModel.kt new file mode 100644 index 000000000..acfc85cc6 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingModel.kt @@ -0,0 +1,5 @@ +package com.xebia.functional.xef.llm.models.embeddings + +enum class EmbeddingModel(val modelName: String) { + TEXT_EMBEDDING_ADA_002("text-embedding-ada-002") +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingRequest.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingRequest.kt new file mode 100644 index 000000000..a76d5ee7c --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingRequest.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.embeddings + +data class EmbeddingRequest(val model: String, val input: List, val user: String) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingResult.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingResult.kt new file mode 100644 index 000000000..f6a2f1666 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/EmbeddingResult.kt @@ -0,0 +1,5 @@ +package com.xebia.functional.xef.llm.models.embeddings + +import com.xebia.functional.xef.llm.models.usage.Usage + +data class EmbeddingResult(val data: List, val usage: Usage) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/RequestConfig.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/RequestConfig.kt new file mode 100644 index 000000000..2f32053b5 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/embeddings/RequestConfig.kt @@ -0,0 +1,9 @@ +package com.xebia.functional.xef.llm.models.embeddings + +import kotlin.jvm.JvmInline + +data class RequestConfig(val model: EmbeddingModel, val user: User) { + companion object { + @JvmInline value class User(val id: String) + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/CFunction.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/CFunction.kt new file mode 100644 index 000000000..04e5940af --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/CFunction.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.functions + +data class CFunction(val name: String, val description: String, val parameters: String) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/FunctionCall.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/FunctionCall.kt new file mode 100644 index 000000000..1da42370f --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/functions/FunctionCall.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.functions + +data class FunctionCall(val name: String?, val arguments: String?) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImageGenerationUrl.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImageGenerationUrl.kt new file mode 100644 index 000000000..42da4f9fa --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImageGenerationUrl.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.images + +data class ImageGenerationUrl(val url: String) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationRequest.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationRequest.kt new file mode 100644 index 000000000..2c76f887a --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationRequest.kt @@ -0,0 +1,9 @@ +package com.xebia.functional.xef.llm.models.images + +data class ImagesGenerationRequest( + val prompt: String, + val numberImages: Int = 1, + val size: String = "1024x1024", + val responseFormat: String = "url", + val user: String? = null +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationResponse.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationResponse.kt new file mode 100644 index 000000000..e6ee8c27a --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/images/ImagesGenerationResponse.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.xef.llm.models.images + +data class ImagesGenerationResponse(val data: List) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionChoice.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionChoice.kt new file mode 100644 index 000000000..7c6e80604 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionChoice.kt @@ -0,0 +1,8 @@ +package com.xebia.functional.xef.llm.models.text + +data class CompletionChoice( + val text: String, + val index: Int, + val logprobs: Int? = null, + val finishReason: String? +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionRequest.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionRequest.kt new file mode 100644 index 000000000..28bc34285 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionRequest.kt @@ -0,0 +1,20 @@ +package com.xebia.functional.xef.llm.models.text + +data class CompletionRequest( + val model: String, + val user: String, + val prompt: String? = null, + val suffix: String? = null, + val maxTokens: Int? = null, + val temperature: Double? = null, + val topP: Double? = null, + val n: Int? = null, + val stream: Boolean? = null, + val logprobs: Int? = null, + val echo: Boolean? = null, + val stop: List? = null, + val presencePenalty: Double = 0.0, + val frequencyPenalty: Double = 0.0, + val bestOf: Int = 1, + val logitBias: Map = emptyMap(), +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionResult.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionResult.kt new file mode 100644 index 000000000..792ed19c0 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/text/CompletionResult.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.xef.llm.models.text + +import com.xebia.functional.xef.llm.models.usage.Usage + +data class CompletionResult( + val id: String, + val `object`: String, + val created: Long, + val model: String, + val choices: List, + val usage: Usage +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/usage/Usage.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/usage/Usage.kt new file mode 100644 index 000000000..cfdfe86ce --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/usage/Usage.kt @@ -0,0 +1,7 @@ +package com.xebia.functional.xef.llm.models.usage + +data class Usage(val promptTokens: Int?, val completionTokens: Int? = null, val totalTokens: Int?) { + companion object { + val ZERO: Usage = Usage(0, 0, 0) + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/AIClient.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/AIClient.kt deleted file mode 100644 index 054e59260..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/AIClient.kt +++ /dev/null @@ -1,18 +0,0 @@ -package com.xebia.functional.xef.llm.openai - -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationRequest -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse - -interface AIClient { - suspend fun createCompletion(request: CompletionRequest): CompletionResult - - suspend fun createChatCompletion(request: ChatCompletionRequest): ChatCompletionResponse - - suspend fun createChatCompletionWithFunctions( - request: ChatCompletionRequestWithFunctions - ): ChatCompletionResponseWithFunctions - - suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult - - suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse -} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/KtorOpenAIClient.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/KtorOpenAIClient.kt deleted file mode 100644 index b3204fa57..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/KtorOpenAIClient.kt +++ /dev/null @@ -1,133 +0,0 @@ -package com.xebia.functional.xef.llm.openai - -import com.xebia.functional.xef.configure -import com.xebia.functional.xef.env.OpenAIConfig -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationRequest -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse -import io.github.oshai.kotlinlogging.KLogger -import io.github.oshai.kotlinlogging.KotlinLogging -import io.ktor.client.* -import io.ktor.client.call.* -import io.ktor.client.plugins.* -import io.ktor.client.plugins.contentnegotiation.* -import io.ktor.client.request.* -import io.ktor.client.statement.* -import io.ktor.http.* -import io.ktor.serialization.kotlinx.json.* -import kotlinx.serialization.Serializable - -@OptIn(ExperimentalStdlibApi::class) -class KtorOpenAIClient(private val config: OpenAIConfig) : AIClient, AutoCloseable { - - private val httpClient: HttpClient = HttpClient { - install(HttpTimeout) - install(ContentNegotiation) { json() } - defaultRequest { url(config.baseUrl) } - } - - private val logger: KLogger = KotlinLogging.logger {} - - override suspend fun createCompletion(request: CompletionRequest): CompletionResult { - val response = - httpClient.post { - url { path("completions") } - configure(config.token, request) - timeout { requestTimeoutMillis = config.requestTimeoutMillis } - } - - val body: CompletionResult = response.bodyOrError() - with(body.usage) { - logger.debug { - "Completion Tokens :: prompt: $promptTokens, completion: $completionTokens, total: $totalTokens" - } - } - return body - } - - override suspend fun createChatCompletion( - request: ChatCompletionRequest - ): ChatCompletionResponse { - val response = - httpClient.post { - url { path("chat/completions") } - configure(config.token, request) - timeout { requestTimeoutMillis = config.requestTimeoutMillis } - } - - val body: ChatCompletionResponse = response.bodyOrError() - with(body.usage) { - logger.debug { - "Chat Completion Tokens :: prompt: $promptTokens, completion: $completionTokens, total: $totalTokens" - } - } - return body - } - - override suspend fun createChatCompletionWithFunctions( - request: ChatCompletionRequestWithFunctions - ): ChatCompletionResponseWithFunctions { - - val response = - httpClient.post { - url { path("chat/completions") } - configure(config.token, request) - timeout { requestTimeoutMillis = config.requestTimeoutMillis } - } - - val body: ChatCompletionResponseWithFunctions = response.bodyOrError() - with(body.usage) { - logger.debug { - "Chat Completion Tokens :: prompt: $promptTokens, completion: $completionTokens, total: $totalTokens" - } - } - return body - } - - override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult = - httpClient - .post { - url { path("embeddings") } - configure(config.token, request) - timeout { requestTimeoutMillis = config.requestTimeoutMillis } - } - .bodyOrError() - - override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse = - httpClient - .post { - url { path("images/generations") } - configure(config.token, request) - timeout { requestTimeoutMillis = config.requestTimeoutMillis } - } - .bodyOrError() - - override fun close() = httpClient.close() -} - -internal suspend inline fun HttpResponse.bodyOrError(): T = - if (status == HttpStatusCode.OK) body() else throw OpenAIClientException(status, body()) - -class OpenAIClientException(val httpStatusCode: HttpStatusCode, val error: Error) : - IllegalStateException( - """ - - OpenAI error: ${error.error.type} - message: ${error.error.message} - StatusCode: $httpStatusCode - param: ${error.error.param} - code: ${error.error.code} - - """ - .trimIndent() - ) - -@Serializable -data class Error(val error: Description) { - @Serializable - data class Description( - val message: String, - val type: String, - val param: String? = null, - val code: String? = null - ) -} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/MockAIClient.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/MockAIClient.kt deleted file mode 100644 index 43881e5cd..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/MockAIClient.kt +++ /dev/null @@ -1,68 +0,0 @@ -package com.xebia.functional.xef.llm.openai - -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationRequest -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse - -class MockOpenAIClient( - private val completion: (CompletionRequest) -> CompletionResult = { - throw NotImplementedError("completion not implemented") - }, - private val chatCompletion: (ChatCompletionRequest) -> ChatCompletionResponse = { - throw NotImplementedError("chat completion not implemented") - }, - private val chatCompletionRequestWithFunctions: - (ChatCompletionRequestWithFunctions) -> ChatCompletionResponseWithFunctions = - { - throw NotImplementedError("chat completion not implemented") - }, - private val embeddings: (EmbeddingRequest) -> EmbeddingResult = ::nullEmbeddings, - private val images: (ImagesGenerationRequest) -> ImagesGenerationResponse = { - throw NotImplementedError("images not implemented") - }, -) : AIClient { - override suspend fun createCompletion(request: CompletionRequest): CompletionResult = - completion(request) - - override suspend fun createChatCompletion( - request: ChatCompletionRequest - ): ChatCompletionResponse = chatCompletion(request) - - override suspend fun createChatCompletionWithFunctions( - request: ChatCompletionRequestWithFunctions - ): ChatCompletionResponseWithFunctions = chatCompletionRequestWithFunctions(request) - - override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult = - embeddings(request) - - override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse = - images(request) -} - -fun nullEmbeddings(request: EmbeddingRequest): EmbeddingResult { - val results = request.input.mapIndexed { index, s -> Embedding(s, listOf(0F), index) } - return EmbeddingResult(request.model, "", results, Usage.ZERO) -} - -fun simpleMockAIClient(execute: (String) -> String): MockOpenAIClient = - MockOpenAIClient( - completion = { req -> - val request = "${req.prompt.orEmpty()} ${req.suffix.orEmpty()}" - val response = execute(request) - val result = CompletionChoice(response, 0, null, "end") - val requestTokens = request.split(' ').size.toLong() - val responseTokens = response.split(' ').size.toLong() - val usage = Usage(requestTokens, responseTokens, requestTokens + responseTokens) - CompletionResult("FakeID123", "", 0, req.model, listOf(result), usage) - }, - chatCompletion = { req -> - val responses = - req.messages.mapIndexed { ix, msg -> - val response = execute(msg.content) - Choice(Message(msg.role, response), "end", ix) - } - val requestTokens = req.messages.sumOf { it.content.split(' ').size.toLong() } - val responseTokens = responses.sumOf { it.message.content.split(' ').size.toLong() } - val usage = Usage(requestTokens, responseTokens, requestTokens + responseTokens) - ChatCompletionResponse("FakeID123", "", 0, req.model, usage, responses) - } - ) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImageGenerationUrl.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImageGenerationUrl.kt deleted file mode 100644 index ec11bb95d..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImageGenerationUrl.kt +++ /dev/null @@ -1,5 +0,0 @@ -package com.xebia.functional.xef.llm.openai.images - -import kotlinx.serialization.Serializable - -@Serializable data class ImageGenerationUrl(val url: String) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationRequest.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationRequest.kt deleted file mode 100644 index ab5639afa..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationRequest.kt +++ /dev/null @@ -1,13 +0,0 @@ -package com.xebia.functional.xef.llm.openai.images - -import kotlinx.serialization.SerialName -import kotlinx.serialization.Serializable - -@Serializable -data class ImagesGenerationRequest( - val prompt: String, - @SerialName("n") val numberImages: Int = 1, - val size: String = "1024x1024", - @SerialName("response_format") val responseFormat: String = "url", - val user: String? = null -) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationResponse.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationResponse.kt deleted file mode 100644 index a3f8b1744..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/images/ImagesGenerationResponse.kt +++ /dev/null @@ -1,6 +0,0 @@ -package com.xebia.functional.xef.llm.openai.images - -import kotlinx.serialization.Serializable - -@Serializable -data class ImagesGenerationResponse(val created: Long, val data: List) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/models.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/models.kt deleted file mode 100644 index 3ae7feaa0..000000000 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/openai/models.kt +++ /dev/null @@ -1,265 +0,0 @@ -package com.xebia.functional.xef.llm.openai - -import com.xebia.functional.tokenizer.ModelType -import kotlin.jvm.JvmInline -import kotlin.jvm.JvmStatic -import kotlinx.serialization.ExperimentalSerializationApi -import kotlinx.serialization.KSerializer -import kotlinx.serialization.SerialName -import kotlinx.serialization.Serializable -import kotlinx.serialization.descriptors.PrimitiveKind -import kotlinx.serialization.descriptors.PrimitiveSerialDescriptor -import kotlinx.serialization.encoding.Decoder -import kotlinx.serialization.encoding.Encoder -import kotlinx.serialization.json.JsonEncoder -import kotlinx.serialization.json.JsonUnquotedLiteral - -enum class EmbeddingModel(val modelName: String) { - TextEmbeddingAda002("text-embedding-ada-002") -} - -data class RequestConfig(val model: EmbeddingModel, val user: User) { - companion object { - @JvmInline value class User(val id: String) - } -} - -@Serializable -data class CompletionChoice( - val text: String, - val index: Int, - val logprobs: Int? = null, - @SerialName("finish_reason") val finishReason: String -) - -@Serializable -data class CompletionResult( - val id: String, - @SerialName("object") val `object`: String, - val created: Long, - val model: String, - val choices: List, - val usage: Usage -) - -@Serializable -data class CompletionRequest( - val model: String, - val user: String, - val prompt: String? = null, - val suffix: String? = null, - @SerialName("max_tokens") val maxTokens: Int? = null, - val temperature: Double? = null, - @SerialName("top_p") val topP: Double? = null, - val n: Int? = null, - val stream: Boolean? = null, - val logprobs: Int? = null, - val echo: Boolean? = null, - val stop: List? = null, - @SerialName("presence_penalty") val presencePenalty: Double = 0.0, - @SerialName("frequency_penalty") val frequencyPenalty: Double = 0.0, - @SerialName("best_of") val bestOf: Int = 1, - @SerialName("logit_bias") val logitBias: Map = emptyMap(), -) - -@Serializable -data class ChatCompletionRequest( - val model: String, - val messages: List, - val temperature: Double = 0.0, - @SerialName("top_p") val topP: Double = 1.0, - val n: Int = 1, - val stream: Boolean = false, - val stop: List? = null, - @SerialName("max_tokens") val maxTokens: Int? = null, - @SerialName("presence_penalty") val presencePenalty: Double = 0.0, - @SerialName("frequency_penalty") val frequencyPenalty: Double = 0.0, - @SerialName("logit_bias") val logitBias: Map = emptyMap(), - val user: String? -) - -@Serializable -data class ChatCompletionRequestWithFunctions( - val model: String, - val messages: List, - val functions: List = emptyList(), - val temperature: Double = 0.0, - @SerialName("top_p") val topP: Double = 1.0, - val n: Int = 1, - val stream: Boolean = false, - val stop: List? = null, - @SerialName("max_tokens") val maxTokens: Int? = null, - @SerialName("presence_penalty") val presencePenalty: Double = 0.0, - @SerialName("frequency_penalty") val frequencyPenalty: Double = 0.0, - @SerialName("logit_bias") val logitBias: Map = emptyMap(), - val user: String?, - @SerialName("function_call") val functionCall: Map, -) - -/* -"functions": [ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA" - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"] - } - }, - "required": ["location"] - } - } - ] - */ -@Serializable -data class CFunction( - val name: String, - val description: String, - val parameters: @Serializable(with = RawJsonStringSerializer::class) String -) - -@OptIn(ExperimentalSerializationApi::class) -private object RawJsonStringSerializer : KSerializer { - override val descriptor = - PrimitiveSerialDescriptor( - "com.xebia.functional.xef.llm.openai.functions.RawJsonString", - PrimitiveKind.STRING - ) - - override fun deserialize(decoder: Decoder): String = decoder.decodeString() - - override fun serialize(encoder: Encoder, value: String) = - when (encoder) { - is JsonEncoder -> encoder.encodeJsonElement(JsonUnquotedLiteral(value)) - else -> encoder.encodeString(value) - } -} - -@Serializable -data class ChatCompletionResponse( - val id: String, - val `object`: String, - val created: Long, - val model: String, - val usage: Usage, - val choices: List -) - -@Serializable -data class ChatCompletionResponseWithFunctions( - val id: String, - val `object`: String, - val created: Long, - val model: String, - val usage: Usage, - val choices: List -) - -@Serializable -data class ChoiceWithFunctions( - val message: MessageWithFunctionCall, - @SerialName("finish_reason") val finishReason: String, - val index: Int -) - -@Serializable -data class Choice( - val message: Message, - @SerialName("finish_reason") val finishReason: String, - val index: Int -) - -@Serializable data class FunctionCall(val name: String, val arguments: String) - -enum class Role { - system, - user, - assistant, - function -} - -@Serializable -data class Message(val role: String, val content: String, val name: String? = Role.assistant.name) - -@Serializable -data class MessageWithFunctionCall( - val role: String, - val content: String? = null, - @SerialName("function_call") val functionCall: FunctionCall, - val name: String? = Role.assistant.name -) - -@Serializable -data class EmbeddingRequest(val model: String, val input: List, val user: String) - -@Serializable -data class EmbeddingResult( - val model: String, - @SerialName("object") val `object`: String, - val data: List, - val usage: Usage -) - -@Serializable class Embedding(val `object`: String, val embedding: List, val index: Int) - -@Serializable -data class Usage( - @SerialName("prompt_tokens") val promptTokens: Long, - @SerialName("completion_tokens") val completionTokens: Long? = null, - @SerialName("total_tokens") val totalTokens: Long -) { - companion object { - val ZERO: Usage = Usage(0, 0, 0) - } -} - -data class LLMModel(val name: String, val kind: Kind, val modelType: ModelType) { - - enum class Kind { - Completion, - Chat, - ChatWithFunctions, - } - - companion object { - @JvmStatic val GPT_4 = LLMModel("gpt-4", Kind.Chat, ModelType.GPT_4) - - @JvmStatic val GPT_4_0314 = LLMModel("gpt-4-0314", Kind.Chat, ModelType.GPT_4) - - @JvmStatic val GPT_4_32K = LLMModel("gpt-4-32k", Kind.Chat, ModelType.GPT_4_32K) - - @JvmStatic val GPT_3_5_TURBO = LLMModel("gpt-3.5-turbo", Kind.Chat, ModelType.GPT_3_5_TURBO) - - @JvmStatic - val GPT_3_5_TURBO_16K = LLMModel("gpt-3.5-turbo-16k", Kind.Chat, ModelType.GPT_3_5_TURBO_16_K) - - @JvmStatic - val GPT_3_5_TURBO_FUNCTIONS = - LLMModel("gpt-3.5-turbo-0613", Kind.ChatWithFunctions, ModelType.GPT_3_5_TURBO_FUNCTIONS) - - @JvmStatic - val GPT_3_5_TURBO_0301 = LLMModel("gpt-3.5-turbo-0301", Kind.Chat, ModelType.GPT_3_5_TURBO) - - @JvmStatic - val TEXT_DAVINCI_003 = LLMModel("text-davinci-003", Kind.Completion, ModelType.TEXT_DAVINCI_003) - - @JvmStatic - val TEXT_DAVINCI_002 = LLMModel("text-davinci-002", Kind.Completion, ModelType.TEXT_DAVINCI_002) - - @JvmStatic - val TEXT_CURIE_001 = - LLMModel("text-curie-001", Kind.Completion, ModelType.TEXT_SIMILARITY_CURIE_001) - - @JvmStatic - val TEXT_BABBAGE_001 = LLMModel("text-babbage-001", Kind.Completion, ModelType.TEXT_BABBAGE_001) - - @JvmStatic val TEXT_ADA_001 = LLMModel("text-ada-001", Kind.Completion, ModelType.TEXT_ADA_001) - } -} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/vectorstores/LocalVectorStore.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/vectorstores/LocalVectorStore.kt index 302234b93..8aa40ec9a 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/vectorstores/LocalVectorStore.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/vectorstores/LocalVectorStore.kt @@ -4,8 +4,8 @@ import arrow.atomic.Atomic import arrow.atomic.getAndUpdate import com.xebia.functional.xef.embeddings.Embedding import com.xebia.functional.xef.embeddings.Embeddings -import com.xebia.functional.xef.llm.openai.EmbeddingModel -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingModel +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig import kotlin.math.sqrt private data class State( @@ -25,7 +25,7 @@ private constructor(private val embeddings: Embeddings, private val state: Atomi constructor(embeddings: Embeddings) : this(embeddings, Atomic(State.empty())) private val requestConfig = - RequestConfig(EmbeddingModel.TextEmbeddingAda002, RequestConfig.Companion.User("user")) + RequestConfig(EmbeddingModel.TEXT_EMBEDDING_ADA_002, RequestConfig.Companion.User("user")) override suspend fun addTexts(texts: List) { val embeddingsList = diff --git a/examples/kotlin/build.gradle.kts b/examples/kotlin/build.gradle.kts index 224ba01ef..5e5fd632f 100644 --- a/examples/kotlin/build.gradle.kts +++ b/examples/kotlin/build.gradle.kts @@ -22,6 +22,7 @@ dependencies { implementation(projects.xefSql) implementation(projects.xefTokenizer) implementation(projects.xefGpt4all) + implementation(projects.xefOpenai) implementation(libs.kotlinx.serialization.json) implementation(libs.logback) implementation(libs.klogging) diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/CustomRuntime.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/CustomRuntime.kt index 0c6847e82..ec918c472 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/CustomRuntime.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/CustomRuntime.kt @@ -1,6 +1,10 @@ package com.xebia.functional.xef.auto -import com.xebia.functional.xef.llm.openai.simpleMockAIClient +import com.xebia.functional.xef.auto.llm.openai.MockAIScope +import com.xebia.functional.xef.auto.llm.openai.simpleMockAIClient +import com.xebia.functional.xef.embeddings.Embedding +import com.xebia.functional.xef.embeddings.Embeddings +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig suspend fun main() { val program = ai { @@ -10,10 +14,23 @@ suspend fun main() { program.getOrElse(customRuntime()) { println(it) } } -private fun customRuntime(): AIRuntime = - AIRuntime { block -> +private fun fakeEmbeddings(): Embeddings = object : Embeddings { + override suspend fun embedDocuments( + texts: List, + chunkSize: Int?, + requestConfig: RequestConfig + ): List = emptyList() + + override suspend fun embedQuery(text: String, requestConfig: RequestConfig): List = + emptyList() +} + +private fun customRuntime(): AIRuntime { + val client = simpleMockAIClient { it } + return AIRuntime(client, fakeEmbeddings()) { block -> MockAIScope( - simpleMockAIClient { it }, + client, block ) { throw it } } +} diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/Population.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/Population.kt index 2d138bb0a..1572f1c9e 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/Population.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/Population.kt @@ -13,9 +13,6 @@ data class Image( suspend fun main() = ai { - val cadiz: Population = prompt("Population of Cádiz, Spain.") - val seattle: Population = prompt("Population of Seattle, WA.") - val img: Image = image("A hybrid city of Cádiz, Spain and Seattle, US.") + val img: Image = image("") println(img) - println("The population of Cádiz is ${cadiz.size} and the population of Seattle is ${seattle.size}") }.getOrElse { println(it) } diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/ControlSignal.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/ControlSignal.kt index 5b637200d..7c460a5f4 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/ControlSignal.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/ControlSignal.kt @@ -23,7 +23,6 @@ internal suspend fun CoreAIScope.controlSignal(memory: Memory): ControlSi |4. Ensure the guidance is actionable. |5. Ensure the guidance accounts for previous answers in the `history`. | - |${remindJSONSchema()} """.trimMargin() return prompt(guidancePrompt).also { println("🧠 Generated control signal: ${truncateText(it.value)}") diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Critique.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Critique.kt index 2ad67921a..4f1d5c4bf 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Critique.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Critique.kt @@ -25,7 +25,6 @@ internal suspend fun CoreAIScope.critique(memory: Memory, currentSolution |Instructions: |1. Provide a critique and determine if the answer truly accomplishes the goal. | - |${remindJSONSchema()} """.trimMargin() ) } diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Main.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Main.kt index 0c2fd9d37..f8a2cf499 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Main.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Main.kt @@ -15,7 +15,7 @@ suspend fun main() = ai { |3. You never use `for` `while` or loops in general, prefer tail recursion. |4. You never use mutable state. | - |Find the problems in this code and provide a Github suggestion code fence with the `diff` to fix it. + |This code is unsafe. Find the problems in this code and provide a Github suggestion code fence with the `diff` to fix it. | |```kotlin |fun access(list: List, index: Int): Int { diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Rendering.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Rendering.kt index a074f21ad..9bca9644e 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Rendering.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Rendering.kt @@ -9,15 +9,6 @@ fun truncateText(text: String, limit: Int = 150): String { }.replace("\n", " ") } -internal fun remindJSONSchema(): String = - """|IMPORTANT INSTRUCTIONS: - |1. Provide your response in application/json output in the json schema provided below. - |2. If you don't provide your response in the json schema provided below, - | the program will fail and someone may get hurt. - |3. I repeat, if you don't provide your response in the json schema provided below, - | the program will fail and someone may get hurt. - |""".trimMargin() - internal fun renderHistory(memory: Memory<*>): String = """|```history |${ memory.history.joinToString("\n") { diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Solution.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Solution.kt index 45e1c646a..f6cfec90e 100644 --- a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Solution.kt +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/tot/Solution.kt @@ -44,7 +44,6 @@ internal suspend fun CoreAIScope.solution( |9. If the solution is not valid set the `isValid` field to `false` and the `value` field to `null`. |10. If the solution is valid set the `isValid` field to `true` and the `value` field to the value of the solution. | - |${remindJSONSchema()} |""".trimMargin() return prompt(Prompt(enhancedPrompt), serializer).also { println("🤖 Generated solution: ${truncateText(it.answer)}") diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 3f2e3e0fa..054923742 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -32,6 +32,7 @@ pdfbox = "2.0.28" mysql = "8.0.33" semverGradle = "0.5.0-alpha.2" scala = "3.3.0" +openai-client-version = "3.3.0" [libraries] arrow-core = { module = "io.arrow-kt:arrow-core", version.ref = "arrow" } @@ -76,6 +77,7 @@ assertj = { module = "org.assertj:assertj-core", version.ref = "assertj" } apache-pdf-box = { module = "org.apache.pdfbox:pdfbox", version.ref = "pdfbox" } jdbc-mysql-connector = { module = "mysql:mysql-connector-java", version.ref = "mysql" } scala-lang = { module = "org.scala-lang:scala3-library_3", version.ref = "scala" } +openai-client = { module = "com.aallam.openai:openai-client", version.ref = "openai-client-version" } [bundles] arrow = [ diff --git a/integrations/lucene/src/main/kotlin/com/xebia/functional/xef/vectorstores/Lucene.kt b/integrations/lucene/src/main/kotlin/com/xebia/functional/xef/vectorstores/Lucene.kt index 4aba4042e..2cc991d88 100644 --- a/integrations/lucene/src/main/kotlin/com/xebia/functional/xef/vectorstores/Lucene.kt +++ b/integrations/lucene/src/main/kotlin/com/xebia/functional/xef/vectorstores/Lucene.kt @@ -2,17 +2,25 @@ package com.xebia.functional.xef.vectorstores import com.xebia.functional.xef.embeddings.Embedding import com.xebia.functional.xef.embeddings.Embeddings -import com.xebia.functional.xef.llm.openai.EmbeddingModel -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingModel +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig import org.apache.lucene.analysis.standard.StandardAnalyzer -import org.apache.lucene.document.* -import java.nio.file.Path -import org.apache.lucene.index.* +import org.apache.lucene.document.Document +import org.apache.lucene.document.Field +import org.apache.lucene.document.KnnFloatVectorField +import org.apache.lucene.document.TextField +import org.apache.lucene.index.DirectoryReader +import org.apache.lucene.index.IndexWriter +import org.apache.lucene.index.IndexWriterConfig +import org.apache.lucene.index.VectorSimilarityFunction import org.apache.lucene.queries.mlt.MoreLikeThis -import org.apache.lucene.search.* +import org.apache.lucene.search.IndexSearcher +import org.apache.lucene.search.KnnFloatVectorQuery +import org.apache.lucene.search.TopDocs import org.apache.lucene.store.Directory import org.apache.lucene.store.MMapDirectory import java.io.StringReader +import java.nio.file.Path open class Lucene( private val writer: IndexWriter, @@ -21,7 +29,7 @@ open class Lucene( ) : VectorStore, AutoCloseable { private val requestConfig = - RequestConfig(EmbeddingModel.TextEmbeddingAda002, RequestConfig.Companion.User("user")) + RequestConfig(EmbeddingModel.TEXT_EMBEDDING_ADA_002, RequestConfig.Companion.User("user")) override suspend fun addTexts(texts: List) { texts.forEach { diff --git a/integrations/postgresql/src/main/kotlin/com/xebia/functional/xef/vectorstores/PostgreSQLVectorStore.kt b/integrations/postgresql/src/main/kotlin/com/xebia/functional/xef/vectorstores/PostgreSQLVectorStore.kt index 45ac65da3..183aeb1fd 100644 --- a/integrations/postgresql/src/main/kotlin/com/xebia/functional/xef/vectorstores/PostgreSQLVectorStore.kt +++ b/integrations/postgresql/src/main/kotlin/com/xebia/functional/xef/vectorstores/PostgreSQLVectorStore.kt @@ -2,11 +2,11 @@ package com.xebia.functional.xef.vectorstores import com.xebia.functional.xef.embeddings.Embedding import com.xebia.functional.xef.embeddings.Embeddings -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig import com.xebia.functional.xef.vectorstores.postgresql.* -import javax.sql.DataSource import kotlinx.uuid.UUID import kotlinx.uuid.generateUUID +import javax.sql.DataSource class PGVectorStore( private val vectorSize: Int, diff --git a/integrations/postgresql/src/test/kotlin/xef/PGVectorStoreSpec.kt b/integrations/postgresql/src/test/kotlin/xef/PGVectorStoreSpec.kt index 4de9e8fec..b0a413db0 100644 --- a/integrations/postgresql/src/test/kotlin/xef/PGVectorStoreSpec.kt +++ b/integrations/postgresql/src/test/kotlin/xef/PGVectorStoreSpec.kt @@ -2,8 +2,8 @@ package xef import com.xebia.functional.xef.embeddings.Embedding import com.xebia.functional.xef.embeddings.Embeddings -import com.xebia.functional.xef.llm.openai.EmbeddingModel -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingModel +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig import com.xebia.functional.xef.vectorstores.PGVectorStore import com.xebia.functional.xef.vectorstores.postgresql.PGDistanceStrategy import com.zaxxer.hikari.HikariConfig @@ -45,7 +45,7 @@ class PGVectorStoreSpec : distanceStrategy = PGDistanceStrategy.Euclidean, preDeleteCollection = false, requestConfig = - RequestConfig(EmbeddingModel.TextEmbeddingAda002, RequestConfig.Companion.User("user")), + RequestConfig(EmbeddingModel.TEXT_EMBEDDING_ADA_002, RequestConfig.Companion.User("user")), chunckSize = null ) diff --git a/java/build.gradle.kts b/java/build.gradle.kts index 88a7864b6..ef3cc0839 100644 --- a/java/build.gradle.kts +++ b/java/build.gradle.kts @@ -8,6 +8,7 @@ plugins { dependencies { implementation(projects.xefCore) + implementation(projects.xefOpenai) implementation(projects.xefPdf) implementation("com.fasterxml.jackson.core:jackson-databind:2.15.2") implementation("com.fasterxml.jackson.module:jackson-module-jsonSchema:2.15.2") @@ -15,4 +16,4 @@ dependencies { tasks.withType().configureEach { useJUnit() -} \ No newline at end of file +} diff --git a/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java b/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java index 378e9a6fb..ac76ddea8 100644 --- a/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java +++ b/java/src/main/java/com/xebia/functional/xef/java/auto/AIScope.java @@ -4,27 +4,27 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.module.jsonSchema.JsonSchema; import com.fasterxml.jackson.module.jsonSchema.JsonSchemaGenerator; +import com.xebia.functional.xef.auto.AIRuntime; import com.xebia.functional.xef.auto.CoreAIScope; -import com.xebia.functional.xef.embeddings.Embeddings; -import com.xebia.functional.xef.embeddings.OpenAIEmbeddings; -import com.xebia.functional.xef.env.OpenAIConfig; -import com.xebia.functional.xef.llm.openai.KtorOpenAIClient; -import com.xebia.functional.xef.llm.openai.LLMModel; -import com.xebia.functional.xef.llm.openai.CFunction; -import com.xebia.functional.xef.llm.openai.images.ImageGenerationUrl; -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse; +import com.xebia.functional.xef.auto.llm.openai.OpenAIRuntime; +import com.xebia.functional.xef.llm.AIClient; +import com.xebia.functional.xef.llm.LLM; +import com.xebia.functional.xef.llm.LLMModel; +import com.xebia.functional.xef.llm.models.functions.CFunction; +import com.xebia.functional.xef.llm.models.images.ImageGenerationUrl; +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse; +import com.xebia.functional.xef.pdf.PDFLoaderKt; import com.xebia.functional.xef.textsplitters.TextSplitter; import com.xebia.functional.xef.vectorstores.LocalVectorStore; import com.xebia.functional.xef.vectorstores.VectorStore; import kotlin.collections.CollectionsKt; import kotlin.coroutines.Continuation; import kotlin.jvm.functions.Function1; -import com.xebia.functional.xef.pdf.PDFLoaderKt; import kotlinx.coroutines.CoroutineScope; -import kotlinx.coroutines.CoroutineStart; +import kotlinx.coroutines.CoroutineScopeKt; import kotlinx.coroutines.ExecutorsKt; import kotlinx.coroutines.JobKt; -import kotlinx.coroutines.CoroutineScopeKt; +import kotlinx.coroutines.CoroutineStart; import kotlinx.coroutines.future.FutureKt; import org.jetbrains.annotations.NotNull; @@ -41,31 +41,26 @@ public class AIScope implements AutoCloseable { private final CoreAIScope scope; private final ObjectMapper om; private final JsonSchemaGenerator schemaGen; - private final KtorOpenAIClient client; + private final AIClient client; private final ExecutorService executorService; private final CoroutineScope coroutineScope; - public AIScope(ObjectMapper om, OpenAIConfig config, ExecutorService executorService) { + public AIScope(ObjectMapper om, AIRuntime runtime, ExecutorService executorService) { this.om = om; this.executorService = executorService; this.coroutineScope = () -> ExecutorsKt.from(executorService).plus(JobKt.Job(null)); this.schemaGen = new JsonSchemaGenerator(om); - this.client = new KtorOpenAIClient(config); - Embeddings embeddings = new OpenAIEmbeddings(config, client); - VectorStore vectorStore = new LocalVectorStore(embeddings); - this.scope = new CoreAIScope(LLMModel.getGPT_3_5_TURBO(), LLMModel.getGPT_3_5_TURBO_FUNCTIONS(), client, vectorStore, embeddings, 3, "user", false, 0.4, 1, 20, 500); - } - - public AIScope(OpenAIConfig config) { - this(new ObjectMapper(), config, Executors.newCachedThreadPool(new AIScopeThreadFactory())); + this.client = runtime.getClient(); + VectorStore vectorStore = new LocalVectorStore(runtime.getEmbeddings()); + this.scope = new CoreAIScope(LLMModel.getGPT_3_5_TURBO(), LLMModel.getGPT_3_5_TURBO_FUNCTIONS(), client, vectorStore, runtime.getEmbeddings(), 3, "user", false, 0.4, 1, 20, 500); } - public AIScope(OpenAIConfig config, ExecutorService executorService) { - this(new ObjectMapper(), config, executorService); + public AIScope(AIRuntime runtime, ExecutorService executorService) { + this(new ObjectMapper(), runtime, executorService); } public AIScope() { - this(new ObjectMapper(), new OpenAIConfig(), Executors.newCachedThreadPool(new AIScopeThreadFactory())); + this(new ObjectMapper(), OpenAIRuntime.defaults(), Executors.newCachedThreadPool(new AIScopeThreadFactory())); } private AIScope(CoreAIScope nested, AIScope outer) { @@ -81,7 +76,7 @@ public CompletableFuture prompt(String prompt, Class cls) { return prompt(prompt, cls, scope.getMaxDeserializationAttempts(), scope.getDefaultSerializationModel(), scope.getUser(), scope.getEcho(), scope.getNumberOfPredictions(), scope.getTemperature(), scope.getDocsInContext(), scope.getMinResponseTokens()); } - public CompletableFuture prompt(String prompt, Class cls, Integer maxAttempts, LLMModel llmModel, String user, Boolean echo, Integer n, Double temperature, Integer bringFromContext, Integer minResponseTokens) { + public CompletableFuture prompt(String prompt, Class cls, Integer maxAttempts, LLM.ChatWithFunctions llmModel, String user, Boolean echo, Integer n, Double temperature, Integer bringFromContext, Integer minResponseTokens) { Function1 decoder = json -> { try { return om.readValue(json, cls); @@ -108,7 +103,7 @@ public CompletableFuture prompt(String prompt, Class cls, Integer maxA return future(continuation -> scope.promptWithSerializer(prompt, functions, decoder, maxAttempts, llmModel, user, echo, n, temperature, bringFromContext, minResponseTokens, continuation)); } - public CompletableFuture> promptMessage(String prompt, LLMModel llmModel, List functions, String user, Boolean echo, Integer n, Double temperature, Integer bringFromContext, Integer minResponseTokens) { + public CompletableFuture> promptMessage(String prompt, LLM.Chat llmModel, List functions, String user, Boolean echo, Integer n, Double temperature, Integer bringFromContext, Integer minResponseTokens) { return future(continuation -> scope.promptMessage(prompt, llmModel, functions, user, echo, n, temperature, bringFromContext, minResponseTokens, continuation)); } diff --git a/kotlin/build.gradle.kts b/kotlin/build.gradle.kts index f50c4850f..7d921e473 100644 --- a/kotlin/build.gradle.kts +++ b/kotlin/build.gradle.kts @@ -39,6 +39,7 @@ kotlin { val commonMain by getting { dependencies { api(projects.xefCore) + api(projects.xefOpenai) } } } @@ -86,4 +87,4 @@ tasks { tasks.withType { dependsOn(tasks.withType()) -} \ No newline at end of file +} diff --git a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIScope.kt b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIScope.kt index 5a87110a2..150714ad8 100644 --- a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIScope.kt +++ b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/AIScope.kt @@ -1,3 +1,43 @@ package com.xebia.functional.xef.auto +import arrow.core.Either +import arrow.core.left +import arrow.core.right +import com.xebia.functional.xef.AIError +import com.xebia.functional.xef.auto.llm.openai.OpenAIRuntime + typealias AIScope = CoreAIScope + +/** + * Run the [AI] value to produce an [A], this method initialises all the dependencies required to + * run the [AI] value and once it finishes it closes all the resources. + * + * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. + */ +suspend inline fun AI.getOrElse( + runtime: AIRuntime = OpenAIRuntime.defaults(), + crossinline orElse: suspend (AIError) -> A +): A = AIScope(runtime, this) { orElse(it) } + +/** + * Run the [AI] value to produce [A]. this method initialises all the dependencies required to run + * the [AI] value and once it finishes it closes all the resources. + * + * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. + * + * @throws AIError in case something went wrong. + * @see getOrElse for an operator that allow directly handling the [AIError] case instead of + * throwing. + */ +suspend inline fun AI.getOrThrow(): A = getOrElse { throw it } + +/** + * Run the [AI] value to produce _either_ an [AIError], or [A]. this method initialises all the + * dependencies required to run the [AI] value and once it finishes it closes all the resources. + * + * This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions. + * + * @see getOrElse for an operator that allow directly handling the [AIError] case. + */ +suspend inline fun AI.toEither(): Either = + ai { invoke().right() }.getOrElse { it.left() } diff --git a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/DeserializerLLMAgent.kt b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/DeserializerLLMAgent.kt index a1ea38df2..68c10dd99 100644 --- a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/DeserializerLLMAgent.kt +++ b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/DeserializerLLMAgent.kt @@ -1,8 +1,8 @@ package com.xebia.functional.xef.auto import com.xebia.functional.xef.auto.serialization.encodeJsonSchema -import com.xebia.functional.xef.llm.openai.CFunction -import com.xebia.functional.xef.llm.openai.LLMModel +import com.xebia.functional.xef.llm.LLM +import com.xebia.functional.xef.llm.models.functions.CFunction import com.xebia.functional.xef.prompt.Prompt import kotlinx.serialization.ExperimentalSerializationApi import kotlinx.serialization.KSerializer @@ -27,7 +27,7 @@ suspend inline fun AIScope.prompt( isLenient = true }, maxDeserializationAttempts: Int = this.maxDeserializationAttempts, - model: LLMModel = this.defaultSerializationModel, + model: LLM.ChatWithFunctions = this.defaultSerializationModel, user: String = this.user, echo: Boolean = this.echo, n: Int = this.numberOfPredictions, @@ -62,7 +62,7 @@ suspend inline fun AIScope.prompt( isLenient = true }, maxDeserializationAttempts: Int = this.maxDeserializationAttempts, - model: LLMModel = this.defaultSerializationModel, + model: LLM.ChatWithFunctions = this.defaultSerializationModel, user: String = this.user, echo: Boolean = this.echo, n: Int = this.numberOfPredictions, @@ -91,8 +91,8 @@ suspend fun AIScope.prompt( isLenient = true }, maxDeserializationAttempts: Int = this.maxDeserializationAttempts, - model: LLMModel = this.defaultSerializationModel, - user: String = "testing", + model: LLM.ChatWithFunctions = this.defaultSerializationModel, + user: String = this.user, echo: Boolean = this.echo, n: Int = this.numberOfPredictions, temperature: Double = this.temperature, diff --git a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/ImageGenerationAgent.kt b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/ImageGenerationAgent.kt index 235891fba..ff058b84b 100644 --- a/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/ImageGenerationAgent.kt +++ b/kotlin/src/commonMain/kotlin/com/xebia/functional/xef/auto/ImageGenerationAgent.kt @@ -1,7 +1,7 @@ package com.xebia.functional.xef.auto import com.xebia.functional.xef.AIError -import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse import com.xebia.functional.xef.prompt.Prompt /** diff --git a/openai/build.gradle.kts b/openai/build.gradle.kts new file mode 100644 index 000000000..09df18d00 --- /dev/null +++ b/openai/build.gradle.kts @@ -0,0 +1,183 @@ +@file:Suppress("DSL_SCOPE_VIOLATION") + +import org.jetbrains.dokka.gradle.DokkaTask + +repositories { + mavenCentral() +} + +plugins { + base + alias(libs.plugins.kotlin.multiplatform) + alias(libs.plugins.kotest.multiplatform) + alias(libs.plugins.kotlinx.serialization) + alias(libs.plugins.spotless) + alias(libs.plugins.dokka) + alias(libs.plugins.arrow.gradle.publish) + alias(libs.plugins.semver.gradle) + //id("com.xebia.asfuture").version("0.0.1") +} + +java { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 + toolchain { + languageVersion = JavaLanguageVersion.of(11) + } +} + +kotlin { + jvm { + compilations { + val integrationTest by compilations.creating { + // Create a test task to run the tests produced by this compilation: + tasks.register("integrationTest") { + description = "Run the integration tests" + group = "verification" + classpath = compileDependencyFiles + runtimeDependencyFiles + output.allOutputs + testClassesDirs = output.classesDirs + + testLogging { + events("passed") + } + } + } + val test by compilations.getting + integrationTest.associateWith(test) + } + } + js(IR) { + browser() + nodejs() + } + + linuxX64() + macosX64() + macosArm64() + mingwX64() + + sourceSets { + all { + languageSettings.optIn("kotlin.ExperimentalStdlibApi") + } + + val commonMain by getting { + dependencies { + implementation(projects.xefCore) + implementation(libs.openai.client) + implementation(libs.klogging) + } + } + + val commonTest by getting { + dependencies { + implementation(libs.kotest.property) + implementation(libs.kotest.framework) + implementation(libs.kotest.assertions) + } + } + + val jvmMain by getting { + dependencies { + implementation(libs.logback) + } + } + + val jsMain by getting + + val jvmTest by getting { + dependencies { + implementation(libs.kotest.junit5) + } + } + + val linuxX64Main by getting { + dependencies { + api(libs.ktor.client.cio) + } + } + + val macosX64Main by getting { + dependencies { + api(libs.ktor.client.cio) + } + } + + val macosArm64Main by getting { + dependencies { + api(libs.ktor.client.cio) + } + } + + val mingwX64Main by getting { + dependencies { + api(libs.ktor.client.winhttp) + } + } + + val linuxX64Test by getting + val macosX64Test by getting + val macosArm64Test by getting + val mingwX64Test by getting + + create("nativeMain") { + dependsOn(commonMain) + linuxX64Main.dependsOn(this) + macosX64Main.dependsOn(this) + macosArm64Main.dependsOn(this) + mingwX64Main.dependsOn(this) + } + + create("nativeTest") { + dependsOn(commonTest) + linuxX64Test.dependsOn(this) + macosX64Test.dependsOn(this) + macosArm64Test.dependsOn(this) + mingwX64Test.dependsOn(this) + } + } +} + +spotless { + kotlin { + target("**/*.kt") + ktfmt().googleStyle() + } +} + +tasks { + withType().configureEach { + maxParallelForks = Runtime.getRuntime().availableProcessors() + useJUnitPlatform() + testLogging { + setExceptionFormat("full") + setEvents(listOf("passed", "skipped", "failed", "standardOut", "standardError")) + } + } + + withType().configureEach { + kotlin.sourceSets.forEach { kotlinSourceSet -> + dokkaSourceSets.named(kotlinSourceSet.name) { + perPackageOption { + matchingRegex.set(".*\\.internal.*") + suppress.set(true) + } + skipDeprecated.set(true) + reportUndocumented.set(false) + val baseUrl: String = checkNotNull(project.properties["pom.smc.url"]?.toString()) + + kotlinSourceSet.kotlin.srcDirs.filter { it.exists() }.forEach { srcDir -> + sourceLink { + localDirectory.set(srcDir) + remoteUrl.set(uri("$baseUrl/blob/main/${srcDir.relativeTo(rootProject.rootDir)}").toURL()) + remoteLineSuffix.set("#L") + } + } + } + } + } +} + +tasks.withType { + dependsOn(tasks.withType()) +} diff --git a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/MockAIClient.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/MockAIClient.kt new file mode 100644 index 000000000..f204e20a2 --- /dev/null +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/MockAIClient.kt @@ -0,0 +1,124 @@ +package com.xebia.functional.xef.auto.llm.openai + +import arrow.core.Either +import arrow.core.left +import arrow.core.right +import com.xebia.functional.xef.AIError +import com.xebia.functional.xef.auto.AI +import com.xebia.functional.xef.auto.CoreAIScope +import com.xebia.functional.xef.llm.AIClient +import com.xebia.functional.xef.llm.LLMModel +import com.xebia.functional.xef.llm.models.chat.* +import com.xebia.functional.xef.llm.models.embeddings.Embedding +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingResult +import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.models.text.CompletionChoice +import com.xebia.functional.xef.llm.models.text.CompletionRequest +import com.xebia.functional.xef.llm.models.text.CompletionResult +import com.xebia.functional.xef.llm.models.usage.Usage +import com.xebia.functional.xef.vectorstores.LocalVectorStore +import kotlin.time.ExperimentalTime + +class MockOpenAIClient( + private val completion: (CompletionRequest) -> CompletionResult = { + throw NotImplementedError("completion not implemented") + }, + private val chatCompletion: (ChatCompletionRequest) -> ChatCompletionResponse = { + throw NotImplementedError("chat completion not implemented") + }, + private val chatCompletionRequestWithFunctions: + (ChatCompletionRequestWithFunctions) -> ChatCompletionResponseWithFunctions = + { + throw NotImplementedError("chat completion not implemented") + }, + private val embeddings: (EmbeddingRequest) -> EmbeddingResult = ::nullEmbeddings, + private val images: (ImagesGenerationRequest) -> ImagesGenerationResponse = { + throw NotImplementedError("images not implemented") + }, +) : AIClient { + override suspend fun createCompletion(request: CompletionRequest): CompletionResult = + completion(request) + + override suspend fun createChatCompletion( + request: ChatCompletionRequest + ): ChatCompletionResponse = chatCompletion(request) + + override suspend fun createChatCompletionWithFunctions( + request: ChatCompletionRequestWithFunctions + ): ChatCompletionResponseWithFunctions = chatCompletionRequestWithFunctions(request) + + override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult = + embeddings(request) + + override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse = + images(request) + + override fun close() {} +} + +fun nullEmbeddings(request: EmbeddingRequest): EmbeddingResult { + val results = request.input.mapIndexed { index, s -> Embedding(s, listOf(0F), index) } + return EmbeddingResult(results, Usage.ZERO) +} + +fun simpleMockAIClient(execute: (String) -> String): MockOpenAIClient = + MockOpenAIClient( + completion = { req -> + val request = "${req.prompt.orEmpty()} ${req.suffix.orEmpty()}" + val response = execute(request) + val result = CompletionChoice(response, 0, null, "end") + val requestTokens = request.split(' ').size + val responseTokens = response.split(' ').size + val usage = Usage(requestTokens, responseTokens, requestTokens + responseTokens) + CompletionResult("FakeID123", "", 0, req.model, listOf(result), usage) + }, + chatCompletion = { req -> + val responses = + req.messages.mapIndexed { ix, msg -> + val response = execute(msg.content ?: "") + Choice(Message(msg.role, response), "end", ix) + } + val requestTokens = req.messages.sumOf { it.content?.split(' ')?.size ?: 0 } + val responseTokens = responses.sumOf { it.message?.content?.split(' ')?.size ?: 0 } + val usage = Usage(requestTokens, responseTokens, requestTokens + responseTokens) + ChatCompletionResponse("FakeID123", "", 0, req.model, usage, responses) + } + ) + +@OptIn(ExperimentalTime::class) +suspend fun MockAIScope( + mockClient: MockOpenAIClient, + block: suspend CoreAIScope.() -> A, + orElse: suspend (AIError) -> A +): A = + try { + val embeddings = OpenAIEmbeddings(mockClient) + val vectorStore = LocalVectorStore(embeddings) + val scope = + CoreAIScope( + LLMModel.GPT_3_5_TURBO, + LLMModel.GPT_3_5_TURBO_FUNCTIONS, + mockClient, + vectorStore, + embeddings + ) + block(scope) + } catch (e: AIError) { + orElse(e) + } + +/** + * Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to + * compute the different responses. + */ +suspend fun AI.mock(mockAI: MockOpenAIClient): Either = + MockAIScope(mockAI, { invoke().right() }, { it.left() }) + +/** + * Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to + * compute the different responses. + */ +suspend fun AI.mock(mockAI: (String) -> String): Either = + MockAIScope(simpleMockAIClient(mockAI), { invoke().right() }, { it.left() }) diff --git a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt new file mode 100644 index 000000000..5810789a1 --- /dev/null +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt @@ -0,0 +1,258 @@ +package com.xebia.functional.xef.auto.llm.openai + +import com.aallam.openai.api.BetaOpenAI +import com.aallam.openai.api.chat.* +import com.aallam.openai.api.chat.ChatCompletionRequest as OpenAIChatCompletionRequest +import com.aallam.openai.api.completion.Choice as OpenAIChoice +import com.aallam.openai.api.completion.CompletionRequest as OpenAICompletionRequest +import com.aallam.openai.api.completion.TextCompletion +import com.aallam.openai.api.completion.completionRequest +import com.aallam.openai.api.core.Usage as OpenAIUsage +import com.aallam.openai.api.embedding.EmbeddingRequest as OpenAIEmbeddingRequest +import com.aallam.openai.api.embedding.EmbeddingResponse +import com.aallam.openai.api.embedding.embeddingRequest +import com.aallam.openai.api.image.ImageCreation +import com.aallam.openai.api.image.ImageSize +import com.aallam.openai.api.image.ImageURL +import com.aallam.openai.api.image.imageCreation +import com.aallam.openai.api.model.ModelId +import com.aallam.openai.client.OpenAI +import com.xebia.functional.xef.llm.AIClient +import com.xebia.functional.xef.llm.models.chat.* +import com.xebia.functional.xef.llm.models.embeddings.Embedding +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingResult +import com.xebia.functional.xef.llm.models.functions.FunctionCall as FnCall +import com.xebia.functional.xef.llm.models.images.ImageGenerationUrl +import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest +import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse +import com.xebia.functional.xef.llm.models.text.CompletionChoice +import com.xebia.functional.xef.llm.models.text.CompletionRequest +import com.xebia.functional.xef.llm.models.text.CompletionResult +import com.xebia.functional.xef.llm.models.usage.Usage +import kotlinx.serialization.json.Json + +class OpenAIClient(val openAI: OpenAI) : AIClient, AutoCloseable { + + override suspend fun createCompletion(request: CompletionRequest): CompletionResult { + val response = openAI.completion(toCompletionRequest(request)) + return completionResult(response) + } + + @OptIn(BetaOpenAI::class) + override suspend fun createChatCompletion( + request: ChatCompletionRequest + ): ChatCompletionResponse { + val response = openAI.chatCompletion(toChatCompletionRequest(request)) + return chatCompletionResult(response) + } + + @OptIn(BetaOpenAI::class) + override suspend fun createChatCompletionWithFunctions( + request: ChatCompletionRequestWithFunctions + ): ChatCompletionResponseWithFunctions { + val response = openAI.chatCompletion(toChatCompletionRequestWithFunctions(request)) + return chatCompletionResultWithFunctions(response) + } + + override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult { + val response = openAI.embeddings(toEmbeddingRequest(request)) + return embeddingResult(response) + } + + @OptIn(BetaOpenAI::class) + override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse { + val response = openAI.imageURL(toImageCreationRequest(request)) + return imageResult(response) + } + + private fun toCompletionRequest(request: CompletionRequest): OpenAICompletionRequest = + completionRequest { + model = ModelId(request.model) + user = request.user + prompt = request.prompt + suffix = request.suffix + maxTokens = request.maxTokens + temperature = request.temperature + topP = request.topP + n = request.n + logprobs = request.logprobs + echo = request.echo + stop = request.stop + presencePenalty = request.presencePenalty + frequencyPenalty = request.frequencyPenalty + bestOf = request.bestOf + logitBias = request.logitBias + } + + private fun completionResult(response: TextCompletion): CompletionResult = + CompletionResult( + id = response.id, + `object` = response.model.id, + created = response.created, + model = response.model.id, + choices = response.choices.map { completionChoice(it) }, + usage = usage(response.usage) + ) + + private fun completionChoice(it: OpenAIChoice): CompletionChoice = + CompletionChoice( + it.text, + it.index, + null, + it.finishReason, + ) + + private fun usage(usage: OpenAIUsage?): Usage = + Usage( + promptTokens = usage?.promptTokens, + completionTokens = usage?.completionTokens, + totalTokens = usage?.totalTokens, + ) + + @OptIn(BetaOpenAI::class) + private fun chatCompletionResult(response: ChatCompletion): ChatCompletionResponse = + ChatCompletionResponse( + id = response.id, + `object` = response.model.id, + created = response.created, + model = response.model.id, + choices = response.choices.map { chatCompletionChoice(it) }, + usage = usage(response.usage) + ) + + @OptIn(BetaOpenAI::class) + private fun chatCompletionResultWithFunctions( + response: ChatCompletion + ): ChatCompletionResponseWithFunctions = + ChatCompletionResponseWithFunctions( + id = response.id, + `object` = response.model.id, + created = response.created, + model = response.model.id, + choices = response.choices.map { chatCompletionChoiceWithFunctions(it) }, + usage = usage(response.usage) + ) + + @OptIn(BetaOpenAI::class) + private fun chatCompletionChoiceWithFunctions(choice: ChatChoice): ChoiceWithFunctions = + ChoiceWithFunctions( + message = + choice.message?.let { + MessageWithFunctionCall( + role = it.role.role, + content = it.content, + name = it.name, + functionCall = it.functionCall?.let { FnCall(it.name, it.arguments) } + ) + }, + finishReason = choice.finishReason, + index = choice.index, + ) + + @OptIn(BetaOpenAI::class) + private fun chatCompletionChoice(choice: ChatChoice): Choice = + Choice( + message = + choice.message?.let { + Message( + role = it.role.role, + content = it.content, + name = it.name, + ) + }, + finishReason = choice.finishReason, + index = choice.index, + ) + + @OptIn(BetaOpenAI::class) + private fun toChatCompletionRequest(request: ChatCompletionRequest): OpenAIChatCompletionRequest = + chatCompletionRequest { + model = ModelId(request.model) + messages = + request.messages.map { + ChatMessage( + role = ChatRole(it.role), + content = it.content, + name = it.name, + ) + } + temperature = request.temperature + topP = request.topP + n = request.n + stop = request.stop + maxTokens = request.maxTokens + presencePenalty = request.presencePenalty + frequencyPenalty = request.frequencyPenalty + logitBias = request.logitBias + user = request.user + } + + @OptIn(BetaOpenAI::class) + private fun toChatCompletionRequestWithFunctions( + request: ChatCompletionRequestWithFunctions + ): OpenAIChatCompletionRequest = chatCompletionRequest { + model = ModelId(request.model) + messages = + request.messages.map { + ChatMessage(role = ChatRole(it.role), content = it.content, name = it.name) + } + + functions = + request.functions.map { + val schema = Json.parseToJsonElement(it.parameters) + ChatCompletionFunction( + name = it.name, + description = it.description, + parameters = Parameters(schema), + ) + } + temperature = request.temperature + topP = request.topP + n = request.n + stop = request.stop + maxTokens = request.maxTokens + presencePenalty = request.presencePenalty + frequencyPenalty = request.frequencyPenalty + logitBias = request.logitBias + user = request.user + functionCall = request.functionCall["name"]?.let { FunctionMode.Named(it) } ?: FunctionMode.Auto + } + + private fun embeddingResult(response: EmbeddingResponse): EmbeddingResult = + EmbeddingResult( + data = + response.embeddings.map { + Embedding( + `object` = "embedding", + embedding = it.embedding.map { it.toFloat() }, + index = it.index + ) + }, + usage = usage(response.usage) + ) + + private fun toEmbeddingRequest(request: EmbeddingRequest): OpenAIEmbeddingRequest = + embeddingRequest { + model = ModelId(request.model) + input = request.input + user = request.user + } + + @OptIn(BetaOpenAI::class) + private fun imageResult(response: List): ImagesGenerationResponse = + ImagesGenerationResponse(data = response.map { ImageGenerationUrl(it.url) }) + + @OptIn(BetaOpenAI::class) + private fun toImageCreationRequest(request: ImagesGenerationRequest): ImageCreation = + imageCreation { + prompt = request.prompt + n = request.numberImages + size = ImageSize(request.size) + user = request.user + } + + override fun close() { + openAI.close() + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/OpenAIEmbeddings.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIEmbeddings.kt similarity index 61% rename from core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/OpenAIEmbeddings.kt rename to openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIEmbeddings.kt index 4ae60d722..ca24ab607 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/embeddings/OpenAIEmbeddings.kt +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIEmbeddings.kt @@ -1,15 +1,15 @@ -package com.xebia.functional.xef.embeddings +package com.xebia.functional.xef.auto.llm.openai import arrow.fx.coroutines.parMap -import com.xebia.functional.xef.env.OpenAIConfig -import com.xebia.functional.xef.llm.openai.AIClient -import com.xebia.functional.xef.llm.openai.EmbeddingRequest -import com.xebia.functional.xef.llm.openai.RequestConfig +import com.xebia.functional.xef.embeddings.Embedding +import com.xebia.functional.xef.embeddings.Embeddings +import com.xebia.functional.xef.llm.AIClient +import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest +import com.xebia.functional.xef.llm.models.embeddings.RequestConfig import kotlin.time.ExperimentalTime @ExperimentalTime -class OpenAIEmbeddings(private val config: OpenAIConfig, private val oaiClient: AIClient) : - Embeddings { +class OpenAIEmbeddings(private val oaiClient: AIClient) : Embeddings { override suspend fun embedDocuments( texts: List, @@ -22,7 +22,7 @@ class OpenAIEmbeddings(private val config: OpenAIConfig, private val oaiClient: } val lists: List> = if (texts.isEmpty()) emptyList() - else texts.chunked(chunkSize ?: config.chunkSize).parMap { createEmbeddings(it) } + else texts.chunked(chunkSize ?: 400).parMap { createEmbeddings(it) } return lists.flatten() } diff --git a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIRuntime.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIRuntime.kt new file mode 100644 index 000000000..a02c9e069 --- /dev/null +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIRuntime.kt @@ -0,0 +1,47 @@ +package com.xebia.functional.xef.auto.llm.openai + +import com.aallam.openai.api.logging.LogLevel +import com.aallam.openai.api.logging.Logger +import com.aallam.openai.client.LoggingConfig +import com.aallam.openai.client.OpenAI +import com.aallam.openai.client.OpenAIConfig +import com.xebia.functional.xef.auto.AIRuntime +import com.xebia.functional.xef.auto.CoreAIScope +import com.xebia.functional.xef.env.getenv +import com.xebia.functional.xef.llm.LLMModel +import com.xebia.functional.xef.vectorstores.LocalVectorStore +import kotlin.jvm.JvmStatic +import kotlin.time.ExperimentalTime + +object OpenAIRuntime { + @JvmStatic fun defaults(): AIRuntime = openAI(null) + + @OptIn(ExperimentalTime::class) + @JvmStatic + fun openAI(config: OpenAIConfig? = null): AIRuntime { + val openAIConfig = + config + ?: OpenAIConfig( + logging = LoggingConfig(logLevel = LogLevel.None, logger = Logger.Empty), + token = + requireNotNull(getenv("OPENAI_TOKEN")) { "OpenAI Token missing from environment." }, + ) + val openAI = OpenAI(openAIConfig) + val client = OpenAIClient(openAI) + val embeddings = OpenAIEmbeddings(client) + return AIRuntime(client, embeddings) { block -> + client.use { openAiClient -> + val vectorStore = LocalVectorStore(embeddings) + val scope = + CoreAIScope( + defaultModel = LLMModel.GPT_3_5_TURBO_16K, + defaultSerializationModel = LLMModel.GPT_3_5_TURBO_FUNCTIONS, + aiClient = openAiClient, + context = vectorStore, + embeddings = embeddings + ) + block(scope) + } + } + } +} diff --git a/scala/src/main/scala/com/xebia/functional/xef/scala/auto/package.scala b/scala/src/main/scala/com/xebia/functional/xef/scala/auto/package.scala index 44920312f..3a357fb13 100644 --- a/scala/src/main/scala/com/xebia/functional/xef/scala/auto/package.scala +++ b/scala/src/main/scala/com/xebia/functional/xef/scala/auto/package.scala @@ -2,18 +2,21 @@ package com.xebia.functional.xef.scala.auto import com.xebia.functional.loom.LoomAdapter import com.xebia.functional.xef.AIError -import com.xebia.functional.xef.llm.openai.LLMModel -import com.xebia.functional.xef.llm.openai.CFunction +import com.xebia.functional.xef.llm.LLM +import com.xebia.functional.xef.llm.LLMModel +import com.xebia.functional.xef.llm.models.functions.CFunction import io.circe.Decoder import io.circe.parser.parse import com.xebia.functional.xef.auto.AIKt import com.xebia.functional.xef.auto.AIRuntime +import com.xebia.functional.xef.auto.llm.openai.OpenAIRuntime import com.xebia.functional.xef.auto.serialization.JsonSchemaKt import com.xebia.functional.xef.pdf.PDFLoaderKt import com.xebia.functional.tokenizer.ModelType -import com.xebia.functional.xef.llm.openai._ +import com.xebia.functional.xef.llm._ +import com.xebia.functional.xef.auto.llm.openai._ import com.xebia.functional.xef.scala.textsplitters.TextSplitter -import com.xebia.functional.xef.llm.openai.images.* +import com.xebia.functional.xef.llm.models.images.* import java.io.File import scala.jdk.CollectionConverters.* @@ -24,7 +27,7 @@ type AI[A] = AIScope ?=> A def ai[A](block: AI[A]): A = LoomAdapter.apply { cont => AIKt.AIScope[A]( - AIRuntime.openAI, + OpenAIRuntime.defaults[A](), { (coreAIScope, _) => given AIScope = AIScope.fromCore(coreAIScope) @@ -43,7 +46,7 @@ extension [A](block: AI[A]) { def prompt[A: Decoder: SerialDescriptor]( prompt: String, maxAttempts: Int = 5, - llmModel: LLMModel = LLMModel.getGPT_3_5_TURBO_FUNCTIONS, + llmModel: LLM.ChatWithFunctions = LLMModel.getGPT_3_5_TURBO_FUNCTIONS, user: String = "testing", echo: Boolean = false, n: Int = 1, @@ -71,7 +74,9 @@ def prompt[A: Decoder: SerialDescriptor]( private def generateCFunctions[A: SerialDescriptor]: List[CFunction] = val descriptor = SerialDescriptor[A].serialDescriptor val serialName = descriptor.getSerialName - val fnName = serialName.substring(serialName.lastIndexOf("."), serialName.length) + val fnName = + if (serialName.contains(".")) serialName.substring(serialName.lastIndexOf("."), serialName.length) + else serialName List(CFunction(fnName, "Generated function for $fnName", JsonSchemaKt.encodeJsonSchema(descriptor))) def contextScope[A: Decoder: SerialDescriptor](docs: List[String])(block: AI[A])(using scope: AIScope): A = @@ -79,7 +84,7 @@ def contextScope[A: Decoder: SerialDescriptor](docs: List[String])(block: AI[A]) def promptMessage( prompt: String, - llmModel: LLMModel = LLMModel.getGPT_3_5_TURBO, + llmModel: LLM.Chat = LLMModel.getGPT_3_5_TURBO, functions: List[CFunction] = List.empty, user: String = "testing", echo: Boolean = false, diff --git a/settings.gradle.kts b/settings.gradle.kts index 720b5cce1..5aee72195 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -27,6 +27,9 @@ project(":xef-filesystem").projectDir = file("filesystem") include("xef-tokenizer") project(":xef-tokenizer").projectDir = file("tokenizer") +include("xef-openai") +project(":xef-openai").projectDir = file("openai") + include("xef-gpt4all") project(":xef-gpt4all").projectDir = file("gpt4all-kotlin")