Skip to content

Commit

Permalink
Merge branch 'main' into build-platform-matrix
Browse files Browse the repository at this point in the history
  • Loading branch information
raulraja authored Jun 30, 2023
2 parents 3e0d444 + c0af078 commit b68280f
Show file tree
Hide file tree
Showing 68 changed files with 1,092 additions and 830 deletions.
2 changes: 0 additions & 2 deletions core/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ kotlin {
api(libs.bundles.ktor.client)
api(projects.xefTokenizer)

// implementation(libs.arrow.fx.stm)

implementation(libs.uuid)
implementation(libs.klogging)
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package com.xebia.functional.xef

import arrow.core.NonEmptyList
import com.xebia.functional.xef.llm.openai.Message
import com.xebia.functional.xef.llm.models.chat.Message
import kotlin.jvm.JvmOverloads

sealed class AIError @JvmOverloads constructor(message: String, cause: Throwable? = null) :
Expand Down
82 changes: 1 addition & 81 deletions core/src/commonMain/kotlin/com/xebia/functional/xef/auto/AI.kt
Original file line number Diff line number Diff line change
@@ -1,18 +1,8 @@
package com.xebia.functional.xef.auto

import arrow.core.Either
import arrow.core.left
import arrow.core.right
import com.xebia.functional.xef.AIError
import com.xebia.functional.xef.embeddings.OpenAIEmbeddings
import com.xebia.functional.xef.env.OpenAIConfig
import com.xebia.functional.xef.llm.openai.AIClient
import com.xebia.functional.xef.llm.openai.LLMModel
import com.xebia.functional.xef.llm.openai.MockOpenAIClient
import com.xebia.functional.xef.llm.openai.simpleMockAIClient
import com.xebia.functional.xef.vectorstores.LocalVectorStore
import com.xebia.functional.xef.llm.AIClient
import com.xebia.functional.xef.vectorstores.VectorStore
import kotlin.time.ExperimentalTime

@DslMarker annotation class AiDsl

Expand All @@ -29,79 +19,9 @@ typealias AI<A> = suspend CoreAIScope.() -> A
/** A DSL block that makes it more convenient to construct [AI] values. */
inline fun <A> ai(noinline block: suspend CoreAIScope.() -> A): AI<A> = block

/**
* Run the [AI] value to produce an [A], this method initialises all the dependencies required to
* run the [AI] value and once it finishes it closes all the resources.
*
* This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions.
*/
suspend inline fun <A> AI<A>.getOrElse(
runtime: AIRuntime<A> = AIRuntime.openAI(),
crossinline orElse: suspend (AIError) -> A
): A = AIScope(runtime, this) { orElse(it) }

suspend fun <A> AIScope(runtime: AIRuntime<A>, block: AI<A>, orElse: suspend (AIError) -> A): A =
try {
runtime.runtime(block)
} catch (e: AIError) {
orElse(e)
}

@OptIn(ExperimentalTime::class)
suspend fun <A> MockAIScope(
mockClient: MockOpenAIClient,
block: suspend CoreAIScope.() -> A,
orElse: suspend (AIError) -> A
): A =
try {
val embeddings = OpenAIEmbeddings(OpenAIConfig(), mockClient)
val vectorStore = LocalVectorStore(embeddings)
val scope =
CoreAIScope(
LLMModel.GPT_3_5_TURBO,
LLMModel.GPT_3_5_TURBO_FUNCTIONS,
mockClient,
vectorStore,
embeddings
)
block(scope)
} catch (e: AIError) {
orElse(e)
}

/**
* Run the [AI] value to produce _either_ an [AIError], or [A]. this method initialises all the
* dependencies required to run the [AI] value and once it finishes it closes all the resources.
*
* This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions.
*
* @see getOrElse for an operator that allow directly handling the [AIError] case.
*/
suspend inline fun <reified A> AI<A>.toEither(): Either<AIError, A> =
ai { invoke().right() }.getOrElse { it.left() }

/**
* Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to
* compute the different responses.
*/
suspend fun <A> AI<A>.mock(mockAI: MockOpenAIClient): Either<AIError, A> =
MockAIScope(mockAI, { invoke().right() }, { it.left() })

/**
* Run the [AI] value to produce _either_ an [AIError], or [A]. This method uses the [mockAI] to
* compute the different responses.
*/
suspend fun <A> AI<A>.mock(mockAI: (String) -> String): Either<AIError, A> =
MockAIScope(simpleMockAIClient(mockAI), { invoke().right() }, { it.left() })

/**
* Run the [AI] value to produce [A]. this method initialises all the dependencies required to run
* the [AI] value and once it finishes it closes all the resources.
*
* This operator is **terminal** meaning it runs and completes the _chain_ of `AI` actions.
*
* @throws AIError in case something went wrong.
* @see getOrElse for an operator that allow directly handling the [AIError] case instead of
* throwing.
*/
suspend inline fun <reified A> AI<A>.getOrThrow(): A = getOrElse { throw it }
Original file line number Diff line number Diff line change
@@ -1,32 +1,12 @@
package com.xebia.functional.xef.auto

import com.xebia.functional.xef.embeddings.OpenAIEmbeddings
import com.xebia.functional.xef.env.OpenAIConfig
import com.xebia.functional.xef.llm.openai.KtorOpenAIClient
import com.xebia.functional.xef.llm.openai.LLMModel
import com.xebia.functional.xef.vectorstores.LocalVectorStore
import kotlin.jvm.JvmStatic
import kotlin.time.ExperimentalTime
import com.xebia.functional.xef.embeddings.Embeddings
import com.xebia.functional.xef.llm.AIClient

data class AIRuntime<A>(val runtime: suspend (block: AI<A>) -> A) {
companion object {
@OptIn(ExperimentalTime::class)
@JvmStatic
fun <A> openAI(): AIRuntime<A> = AIRuntime { block ->
val openAIConfig = OpenAIConfig()
KtorOpenAIClient(openAIConfig).use { openAiClient ->
val embeddings = OpenAIEmbeddings(openAIConfig, openAiClient)
val vectorStore = LocalVectorStore(embeddings)
val scope =
CoreAIScope(
defaultModel = LLMModel.GPT_3_5_TURBO_16K,
defaultSerializationModel = LLMModel.GPT_3_5_TURBO_FUNCTIONS,
aiClient = openAiClient,
context = vectorStore,
embeddings = embeddings
)
block(scope)
}
}
}
data class AIRuntime<A>(
val client: AIClient,
val embeddings: Embeddings,
val runtime: suspend (block: AI<A>) -> A
) {
companion object
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,17 @@ import com.xebia.functional.tokenizer.ModelType
import com.xebia.functional.tokenizer.truncateText
import com.xebia.functional.xef.AIError
import com.xebia.functional.xef.embeddings.Embeddings
import com.xebia.functional.xef.llm.openai.*
import com.xebia.functional.xef.llm.openai.CFunction
import com.xebia.functional.xef.llm.openai.images.ImagesGenerationRequest
import com.xebia.functional.xef.llm.openai.images.ImagesGenerationResponse
import com.xebia.functional.xef.llm.AIClient
import com.xebia.functional.xef.llm.LLM
import com.xebia.functional.xef.llm.LLMModel
import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequest
import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequestWithFunctions
import com.xebia.functional.xef.llm.models.chat.Message
import com.xebia.functional.xef.llm.models.chat.Role
import com.xebia.functional.xef.llm.models.functions.CFunction
import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest
import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse
import com.xebia.functional.xef.llm.models.text.CompletionRequest
import com.xebia.functional.xef.prompt.Prompt
import com.xebia.functional.xef.vectorstores.CombinedVectorStore
import com.xebia.functional.xef.vectorstores.LocalVectorStore
Expand All @@ -25,8 +32,8 @@ import kotlin.jvm.JvmName
* programs.
*/
class CoreAIScope(
val defaultModel: LLMModel,
val defaultSerializationModel: LLMModel,
val defaultModel: LLM.Chat,
val defaultSerializationModel: LLM.ChatWithFunctions,
val aiClient: AIClient,
val context: VectorStore,
val embeddings: Embeddings,
Expand Down Expand Up @@ -117,7 +124,7 @@ class CoreAIScope(
functions: List<CFunction>,
serializer: (json: String) -> A,
maxDeserializationAttempts: Int = this.maxDeserializationAttempts,
model: LLMModel = defaultSerializationModel,
model: LLM.ChatWithFunctions = defaultSerializationModel,
user: String = this.user,
echo: Boolean = this.echo,
numberOfPredictions: Int = this.numberOfPredictions,
Expand Down Expand Up @@ -162,7 +169,7 @@ class CoreAIScope(
@AiDsl
suspend fun promptMessage(
question: String,
model: LLMModel = defaultModel,
model: LLM.Chat = defaultModel,
functions: List<CFunction> = emptyList(),
user: String = this.user,
echo: Boolean = this.echo,
Expand All @@ -186,7 +193,7 @@ class CoreAIScope(
@AiDsl
suspend fun promptMessage(
prompt: Prompt,
model: LLMModel = defaultModel,
model: LLM.Chat = defaultModel,
functions: List<CFunction> = emptyList(),
user: String = this.user,
echo: Boolean = this.echo,
Expand Down Expand Up @@ -249,7 +256,7 @@ class CoreAIScope(
}

suspend fun buildChatRequest(): ChatCompletionRequest {
val messages: List<Message> = listOf(Message(Role.system.name, promptWithContext))
val messages: List<Message> = listOf(Message(Role.SYSTEM.name, promptWithContext))
return ChatCompletionRequest(
model = model.name,
user = user,
Expand All @@ -261,7 +268,7 @@ class CoreAIScope(
}

suspend fun chatWithFunctionsRequest(): ChatCompletionRequestWithFunctions {
val role: String = Role.user.name
val role: String = Role.USER.name
val firstFnName: String? = functions.firstOrNull()?.name
val messages: List<Message> = listOf(Message(role, promptWithContext))
return ChatCompletionRequestWithFunctions(
Expand All @@ -276,15 +283,15 @@ class CoreAIScope(
)
}

return when (model.kind) {
LLMModel.Kind.Completion ->
return when (model) {
is LLM.Completion ->
aiClient.createCompletion(buildCompletionRequest()).choices.map { it.text }
LLMModel.Kind.Chat ->
aiClient.createChatCompletion(buildChatRequest()).choices.map { it.message.content }
LLMModel.Kind.ChatWithFunctions ->
aiClient.createChatCompletionWithFunctions(chatWithFunctionsRequest()).choices.map {
it.message.functionCall.arguments
is LLM.ChatWithFunctions ->
aiClient.createChatCompletionWithFunctions(chatWithFunctionsRequest()).choices.mapNotNull {
it.message?.functionCall?.arguments
}
else ->
aiClient.createChatCompletion(buildChatRequest()).choices.mapNotNull { it.message?.content }
}
}

Expand Down Expand Up @@ -323,16 +330,16 @@ class CoreAIScope(
} else prompt
}

private fun tokensFromMessages(messages: List<Message>, model: LLMModel): Int {
private fun tokensFromMessages(messages: List<Message>, model: LLM): Int {
fun Encoding.countTokensFromMessages(tokensPerMessage: Int, tokensPerName: Int): Int =
messages.sumOf { message ->
countTokens(message.role) +
countTokens(message.content) +
(message.content?.let { countTokens(it) } ?: 0) +
tokensPerMessage +
(message.name?.let { tokensPerName } ?: 0)
} + 3

fun fallBackTo(fallbackModel: LLMModel, paddingTokens: Int): Int {
fun fallBackTo(fallbackModel: LLM, paddingTokens: Int): Int {
logger.debug {
"Warning: ${model.name} may change over time. " +
"Returning messages num tokens assuming ${fallbackModel.name} + $paddingTokens padding tokens."
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.xebia.functional.xef.embeddings

import com.xebia.functional.xef.llm.openai.RequestConfig
import com.xebia.functional.xef.llm.models.embeddings.RequestConfig

data class Embedding(val data: List<Float>)

Expand Down
21 changes: 0 additions & 21 deletions core/src/commonMain/kotlin/com/xebia/functional/xef/env/config.kt

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
package com.xebia.functional.xef.llm

import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequest
import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequestWithFunctions
import com.xebia.functional.xef.llm.models.chat.ChatCompletionResponse
import com.xebia.functional.xef.llm.models.chat.ChatCompletionResponseWithFunctions
import com.xebia.functional.xef.llm.models.embeddings.EmbeddingRequest
import com.xebia.functional.xef.llm.models.embeddings.EmbeddingResult
import com.xebia.functional.xef.llm.models.images.ImagesGenerationRequest
import com.xebia.functional.xef.llm.models.images.ImagesGenerationResponse
import com.xebia.functional.xef.llm.models.text.CompletionRequest
import com.xebia.functional.xef.llm.models.text.CompletionResult

interface AIClient : AutoCloseable {
suspend fun createCompletion(request: CompletionRequest): CompletionResult

suspend fun createChatCompletion(request: ChatCompletionRequest): ChatCompletionResponse

suspend fun createChatCompletionWithFunctions(
request: ChatCompletionRequestWithFunctions
): ChatCompletionResponseWithFunctions

suspend fun createEmbeddings(request: EmbeddingRequest): EmbeddingResult

suspend fun createImages(request: ImagesGenerationRequest): ImagesGenerationResponse

override fun close() {}
}
Loading

0 comments on commit b68280f

Please sign in to comment.