diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt index 6aa48c7a9..2df27608e 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt @@ -51,32 +51,28 @@ interface Chat : LLM { return totalLeftTokens } - fun buildChatRequest(): ChatCompletionRequest { - val messages: List = listOf(Message(Role.USER, promptWithContext, Role.USER.name)) - return ChatCompletionRequest( + val userMessage = Message(Role.USER, promptWithContext, Role.USER.name) + fun buildChatRequest(): ChatCompletionRequest = + ChatCompletionRequest( model = name, user = promptConfiguration.user, - messages = messages, + messages = listOf(userMessage), n = promptConfiguration.numberOfPredictions, temperature = promptConfiguration.temperature, - maxTokens = checkTotalLeftChatTokens(messages) + maxTokens = checkTotalLeftChatTokens(listOf(userMessage)) ) - } - fun chatWithFunctionsRequest(): ChatCompletionRequestWithFunctions { - val firstFnName: String? = functions.firstOrNull()?.name - val messages: List = listOf(Message(Role.USER, promptWithContext, Role.USER.name)) - return ChatCompletionRequestWithFunctions( + fun chatWithFunctionsRequest(): ChatCompletionRequestWithFunctions = + ChatCompletionRequestWithFunctions( model = name, user = promptConfiguration.user, - messages = messages, + messages = listOf(userMessage), n = promptConfiguration.numberOfPredictions, temperature = promptConfiguration.temperature, - maxTokens = checkTotalLeftChatTokens(messages), + maxTokens = checkTotalLeftChatTokens(listOf(userMessage)), functions = functions, - functionCall = mapOf("name" to (firstFnName ?: "")) + functionCall = mapOf("name" to (functions.firstOrNull()?.name ?: "")) ) - } return when (this) { is ChatWithFunctions -> diff --git a/gpt4all-kotlin/src/jvmMain/kotlin/com/xebia/functional/gpt4all/HuggingFaceLocalEmbeddings.kt b/gpt4all-kotlin/src/jvmMain/kotlin/com/xebia/functional/gpt4all/HuggingFaceLocalEmbeddings.kt index e9e165d59..ac4d1b0d3 100644 --- a/gpt4all-kotlin/src/jvmMain/kotlin/com/xebia/functional/gpt4all/HuggingFaceLocalEmbeddings.kt +++ b/gpt4all-kotlin/src/jvmMain/kotlin/com/xebia/functional/gpt4all/HuggingFaceLocalEmbeddings.kt @@ -1,6 +1,7 @@ package com.xebia.functional.gpt4all import ai.djl.huggingface.tokenizers.HuggingFaceTokenizer +import com.xebia.functional.xef.embeddings.Embedding as XefEmbedding import com.xebia.functional.xef.embeddings.Embeddings import com.xebia.functional.xef.llm.models.embeddings.Embedding import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest @@ -15,7 +16,7 @@ class HuggingFaceLocalEmbeddings(name: String, artifact: String) : com.xebia.fun override val name: String = HuggingFaceLocalEmbeddings::class.java.canonicalName override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult { - val embedings = tokenizer.batchEncode(request.input) + val embeddings = tokenizer.batchEncode(request.input) return EmbeddingResult( data = embedings.mapIndexed { n, em -> Embedding("embedding", em.ids.map { it.toFloat() }, n) }, usage = Usage.ZERO @@ -26,19 +27,12 @@ class HuggingFaceLocalEmbeddings(name: String, artifact: String) : com.xebia.fun texts: List, chunkSize: Int?, requestConfig: RequestConfig - ): List { - val encodings = tokenizer.batchEncode(texts) - return encodings.mapIndexed { n, em -> - com.xebia.functional.xef.embeddings.Embedding( - em.ids.map { it.toFloat() }, - ) + ): List = + tokenizer.batchEncode(texts).mapIndexed { n, em -> + XefEmbedding(em.ids.map { it.toFloat() }) } - } - override suspend fun embedQuery( - text: String, - requestConfig: RequestConfig - ): List = + override suspend fun embedQuery(text: String, requestConfig: RequestConfig): List = embedDocuments(listOf(text), null, requestConfig) companion object { diff --git a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/DeserializerLLMAgent.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/DeserializerLLMAgent.kt index 156d48cc3..f2c8275f8 100644 --- a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/DeserializerLLMAgent.kt +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/DeserializerLLMAgent.kt @@ -61,16 +61,14 @@ suspend fun CoreAIScope.prompt( isLenient = true }, promptConfiguration: PromptConfiguration = PromptConfiguration.DEFAULTS, -): A { - val functions = generateCFunction(serializer.descriptor) - return model.prompt( +): A = + model.prompt( prompt, context, - functions, + generateCFunction(serializer.descriptor), { json.decodeFromString(serializer, it) }, promptConfiguration ) -} @OptIn(ExperimentalSerializationApi::class) private fun generateCFunction(descriptor: SerialDescriptor): List { diff --git a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt index 9f8f68cb3..7b9022089 100644 --- a/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt +++ b/openai/src/commonMain/kotlin/com/xebia/functional/xef/auto/llm/openai/OpenAIClient.kt @@ -8,7 +8,6 @@ import com.aallam.openai.api.completion.CompletionRequest as OpenAICompletionReq import com.aallam.openai.api.completion.TextCompletion import com.aallam.openai.api.completion.completionRequest import com.aallam.openai.api.core.Usage as OpenAIUsage -import com.aallam.openai.api.embedding.EmbeddingRequest as OpenAIEmbeddingRequest import com.aallam.openai.api.embedding.EmbeddingResponse import com.aallam.openai.api.embedding.embeddingRequest import com.aallam.openai.api.image.ImageCreation @@ -52,7 +51,14 @@ class OpenAIModel( request: ChatCompletionRequest ): ChatCompletionResponse { val response = client.chatCompletion(toChatCompletionRequest(request)) - return chatCompletionResult(response) + return ChatCompletionResponse( + id = response.id, + `object` = response.model.id, + created = response.created, + model = response.model.id, + choices = response.choices.map { chatCompletionChoice(it) }, + usage = usage(response.usage) + ) } @OptIn(BetaOpenAI::class) @@ -60,19 +66,45 @@ class OpenAIModel( request: ChatCompletionRequestWithFunctions ): ChatCompletionResponseWithFunctions { val response = client.chatCompletion(toChatCompletionRequestWithFunctions(request)) - return chatCompletionResultWithFunctions(response) + + fun chatCompletionChoiceWithFunctions(choice: ChatChoice): ChoiceWithFunctions = + ChoiceWithFunctions( + message = + choice.message?.let { + MessageWithFunctionCall( + role = it.role.role, + content = it.content, + name = it.name, + functionCall = it.functionCall?.let { FnCall(it.name, it.arguments) } + ) + }, + finishReason = choice.finishReason, + index = choice.index, + ) + + return ChatCompletionResponseWithFunctions( + id = response.id, + `object` = response.model.id, + created = response.created, + model = response.model.id, + choices = response.choices.map { chatCompletionChoiceWithFunctions(it) }, + usage = usage(response.usage) + ) } override suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult { - val response = client.embeddings(toEmbeddingRequest(request)) - return embeddingResult(response) + val openAIRequest = embeddingRequest { + model = ModelId(request.model) + input = request.input + user = request.user + } + + return embeddingResult(client.embeddings(openAIRequest)) } @OptIn(BetaOpenAI::class) - override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse { - val response = client.imageURL(toImageCreationRequest(request)) - return imageResult(response) - } + override suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse = + imageResult(client.imageURL(toImageCreationRequest(request))) private fun toCompletionRequest(request: CompletionRequest): OpenAICompletionRequest = completionRequest { @@ -118,46 +150,6 @@ class OpenAIModel( totalTokens = usage?.totalTokens, ) - @OptIn(BetaOpenAI::class) - private fun chatCompletionResult(response: ChatCompletion): ChatCompletionResponse = - ChatCompletionResponse( - id = response.id, - `object` = response.model.id, - created = response.created, - model = response.model.id, - choices = response.choices.map { chatCompletionChoice(it) }, - usage = usage(response.usage) - ) - - @OptIn(BetaOpenAI::class) - private fun chatCompletionResultWithFunctions( - response: ChatCompletion - ): ChatCompletionResponseWithFunctions = - ChatCompletionResponseWithFunctions( - id = response.id, - `object` = response.model.id, - created = response.created, - model = response.model.id, - choices = response.choices.map { chatCompletionChoiceWithFunctions(it) }, - usage = usage(response.usage) - ) - - @OptIn(BetaOpenAI::class) - private fun chatCompletionChoiceWithFunctions(choice: ChatChoice): ChoiceWithFunctions = - ChoiceWithFunctions( - message = - choice.message?.let { - MessageWithFunctionCall( - role = it.role.role, - content = it.content, - name = it.name, - functionCall = it.functionCall?.let { FnCall(it.name, it.arguments) } - ) - }, - finishReason = choice.finishReason, - index = choice.index, - ) - @OptIn(BetaOpenAI::class) private fun chatCompletionChoice(choice: ChatChoice): Choice = Choice( @@ -258,13 +250,6 @@ class OpenAIModel( usage = usage(response.usage) ) - private fun toEmbeddingRequest(request: EmbeddingRequest): OpenAIEmbeddingRequest = - embeddingRequest { - model = ModelId(request.model) - input = request.input - user = request.user - } - @OptIn(BetaOpenAI::class) private fun imageResult(response: List): ImagesGenerationResponse = ImagesGenerationResponse(data = response.map { ImageGenerationUrl(it.url) })