diff --git a/charts/console-rapid/Chart.yaml b/charts/console-rapid/Chart.yaml index 3b10844c13..784b3f55ca 100644 --- a/charts/console-rapid/Chart.yaml +++ b/charts/console-rapid/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: console-rapid description: rapid channel chart for the plural console (used for testing) appVersion: 0.10.42 -version: 0.3.74 +version: 0.3.75 dependencies: - name: kas version: 0.1.0 diff --git a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml index 7bca02dab2..8d056ac915 100644 --- a/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml +++ b/charts/controller/crds/deployments.plural.sh_deploymentsettings.yaml @@ -272,7 +272,6 @@ spec: - VERTEX type: string toolProvider: - default: OPENAI description: Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks enum: diff --git a/go/controller/api/v1alpha1/deploymentsettings_types.go b/go/controller/api/v1alpha1/deploymentsettings_types.go index 86e14478e2..42c70c72b8 100644 --- a/go/controller/api/v1alpha1/deploymentsettings_types.go +++ b/go/controller/api/v1alpha1/deploymentsettings_types.go @@ -186,7 +186,6 @@ type AISettings struct { // Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks // // +kubebuilder:validation:Enum=OPENAI;ANTHROPIC;OLLAMA;AZURE;BEDROCK;VERTEX - // +kubebuilder:default=OPENAI // +kubebuilder:validation:Optional ToolProvider *console.AiProvider `json:"toolProvider,omitempty"` diff --git a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml index 7bca02dab2..8d056ac915 100644 --- a/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml +++ b/go/controller/config/crd/bases/deployments.plural.sh_deploymentsettings.yaml @@ -272,7 +272,6 @@ spec: - VERTEX type: string toolProvider: - default: OPENAI description: Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks enum: diff --git a/lib/console/ai/provider.ex b/lib/console/ai/provider.ex index 77b9319221..b9f70e5676 100644 --- a/lib/console/ai/provider.ex +++ b/lib/console/ai/provider.ex @@ -48,8 +48,7 @@ defmodule Console.AI.Provider do do: handle_tool_calls(result, tools) end - def summary(text), - do: completion([{:user, text}], preface: @summary) + def summary(text), do: completion([{:user, text}], preface: @summary) defp tool_client(%DeploymentSettings{ai: %AI{tool_provider: p}} = settings) when not is_nil(p), do: client(put_in(settings.ai.provider, p)) diff --git a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml index 7bca02dab2..8d056ac915 100644 --- a/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml +++ b/plural/helm/console/crds/deployments.plural.sh_deploymentsettings.yaml @@ -272,7 +272,6 @@ spec: - VERTEX type: string toolProvider: - default: OPENAI description: Provider to use for tool calling, in case you want to use a different LLM more optimized to those tasks enum: