diff --git a/.devcontainer/demo/README.md b/.devcontainer/demo/README.md index 991fe793f8b..1221fbeffee 100644 --- a/.devcontainer/demo/README.md +++ b/.devcontainer/demo/README.md @@ -5,7 +5,7 @@ Run this when the script completes ```bash # If you are at the root of the repo cd .devcontainer/demo -./armadactl create queue test --priorityFactor 1 +./armadactl create queue test --priority-factor 1 ./armadactl submit jobs.yaml ./armadactl watch test job-set-1 ``` @@ -21,4 +21,4 @@ Forward these ports: - 10000: Lookoutv2 API - 8080: Armada Server API -and go to: http://localhost:8089 \ No newline at end of file +and go to: http://localhost:8089 diff --git a/.github/actions/setup-go-cache/action.yml b/.github/actions/setup-go-cache/action.yml index 5754efd7063..53d21cde1a1 100644 --- a/.github/actions/setup-go-cache/action.yml +++ b/.github/actions/setup-go-cache/action.yml @@ -5,9 +5,9 @@ inputs: description: "Prefix for the cache key" required: true go-version: - description: "Version of Go. Default 1.20" + description: "Version of Go. Default 1.21" required: false - default: "1.20" + default: "1.21" cache-tools: description: "True/false flag to cache tools" required: false diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4ca0ff98457..5d5b408dec7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: - uses: goreleaser/goreleaser-action@v5 with: distribution: goreleaser - version: v1.20.0 + version: v1.24.0 args: release --snapshot --skip-sbom --skip-sign --clean env: DOCKER_REPO: "gresearch" @@ -88,5 +88,4 @@ jobs: with: name: nupkg-artifacts path: | - ./bin/client/DotNet/G-Research.Armada.Client.${{ steps.create-release-tag.outputs.release_tag }}.nupkg ./bin/client/DotNet/ArmadaProject.Io.Client.${{ steps.create-release-tag.outputs.release_tag }}.nupkg diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 87fae9f257a..cf53c857ed3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -83,7 +83,7 @@ jobs: uses: goreleaser/goreleaser-action@v5 with: distribution: goreleaser - version: v1.19.2 + version: v1.24.0 args: "-f ./.goreleaser.yml release --clean" env: FULL_RELEASE: true @@ -119,4 +119,4 @@ jobs: TAG: ${{ github.event.workflow_run.head_branch }} run: | VERSION=${TAG#v} - dotnet nuget push ./bin/client/DotNet/G-Research.Armada.Client.$VERSION.nupkg ./bin/client/DotNet/ArmadaProject.Io.Client.$VERSION.nupkg -k ${{ secrets.NUGET_API_KEY }} -s https://api.nuget.org/v3/index.json + dotnet nuget push ./bin/client/DotNet/ArmadaProject.Io.Client.$VERSION.nupkg -k ${{ secrets.NUGET_API_KEY }} -s https://api.nuget.org/v3/index.json diff --git a/.gitignore b/.gitignore index d3f2315c6b8..806150767e9 100644 --- a/.gitignore +++ b/.gitignore @@ -70,7 +70,6 @@ msbuild.log msbuild.err msbuild.wrn packages -client/DotNet.gRPC/Armada.Client.Grpc/generated # python # Pycache, dist, poetry versions @@ -94,6 +93,7 @@ build/ssl/certs/ca-certificates.crt developer/volumes/go/pkg/ .coverage coverage.xml +.venv # Yarn developer/yarn.lock diff --git a/.goreleaser.yml b/.goreleaser.yml index 3b32929e0ee..229b42622e2 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -19,7 +19,7 @@ env: # To use a builder other than "default", set this variable. # Necessary for, e.g., GitHub actions cache integration. - DOCKER_BUILDX_BUILDER={{ if index .Env "DOCKER_BUILDX_BUILDER" }}{{ .Env.DOCKER_BUILDX_BUILDER }}{{ else }}default{{ end }} - - GOVERSION={{ if index .Env "GOVERSION" }}{{ .Env.GOVERSION }}{{ else }}go1.20{{ end }} + - GOVERSION={{ if index .Env "GOVERSION" }}{{ .Env.GOVERSION }}{{ else }}go1.21{{ end }} builds: - env: [CGO_ENABLED=0] @@ -49,15 +49,6 @@ builds: - linux goarch: - amd64 - - env: [CGO_ENABLED=0] - id: pulsartest - binary: pulsartest - main: ./cmd/pulsartest/main.go - mod_timestamp: '{{ .CommitTimestamp }}' - goos: - - linux - goarch: - - amd64 - env: [CGO_ENABLED=0] id: testsuite binary: testsuite diff --git a/.run/Armada (Pulsar Scheduler).run.xml b/.run/Armada.run.xml similarity index 73% rename from .run/Armada (Pulsar Scheduler).run.xml rename to .run/Armada.run.xml index 74f89fac343..40beed40d1a 100644 --- a/.run/Armada (Pulsar Scheduler).run.xml +++ b/.run/Armada.run.xml @@ -1,11 +1,11 @@ - + - - + + @@ -15,10 +15,10 @@ - - + + - \ No newline at end of file + diff --git a/.run/Pulsar Executor.run.xml b/.run/Executor.run.xml similarity index 86% rename from .run/Pulsar Executor.run.xml rename to .run/Executor.run.xml index 8db3ac1d0f5..1bac201ed06 100644 --- a/.run/Pulsar Executor.run.xml +++ b/.run/Executor.run.xml @@ -1,5 +1,5 @@ - + diff --git a/.run/Legacy Server.run.xml b/.run/Legacy Server.run.xml deleted file mode 100644 index 9f9ab23577c..00000000000 --- a/.run/Legacy Server.run.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.run/Lookout Ingester V2.run.xml b/.run/Lookout Ingester V2.run.xml index b3fb083fc3a..58e7bdbdfdd 100644 --- a/.run/Lookout Ingester V2.run.xml +++ b/.run/Lookout Ingester V2.run.xml @@ -4,7 +4,6 @@ - @@ -15,4 +14,4 @@ - \ No newline at end of file + diff --git a/.run/LookoutV2.run.xml b/.run/LookoutV2.run.xml index 5a027f8431a..c01ec8f5b09 100644 --- a/.run/LookoutV2.run.xml +++ b/.run/LookoutV2.run.xml @@ -4,7 +4,6 @@ - @@ -14,4 +13,4 @@ - \ No newline at end of file + diff --git a/.run/Run Migrations.run.xml b/.run/Run Migrations.run.xml new file mode 100644 index 00000000000..b8f0bbf4a45 --- /dev/null +++ b/.run/Run Migrations.run.xml @@ -0,0 +1,17 @@ + + + + \ No newline at end of file diff --git a/.run/Scheduler.run.xml b/.run/Scheduler.run.xml index cb03b1195c0..d9beb38c734 100644 --- a/.run/Scheduler.run.xml +++ b/.run/Scheduler.run.xml @@ -4,11 +4,11 @@ + - @@ -16,4 +16,4 @@ - \ No newline at end of file + diff --git a/.run/Pulsar Server.run.xml b/.run/Server.run.xml similarity index 83% rename from .run/Pulsar Server.run.xml rename to .run/Server.run.xml index e6b56d783ba..43a19f219b0 100644 --- a/.run/Pulsar Server.run.xml +++ b/.run/Server.run.xml @@ -1,15 +1,15 @@ - + + - diff --git a/.run/Start Dependencies.run.xml b/.run/Start Dependencies.run.xml new file mode 100644 index 00000000000..05d264d9ddf --- /dev/null +++ b/.run/Start Dependencies.run.xml @@ -0,0 +1,17 @@ + + + + \ No newline at end of file diff --git a/.run/lookoutv2PostgresMigration.run.xml b/.run/lookoutv2PostgresMigration.run.xml index 7379481ca7b..be782ff2ecc 100644 --- a/.run/lookoutv2PostgresMigration.run.xml +++ b/.run/lookoutv2PostgresMigration.run.xml @@ -5,7 +5,6 @@ - diff --git a/README.md b/README.md index 2ccda4be29e..b2ab387df87 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ For documentation, see the following: - [User guide](./docs/user.md) - [Quickstart](./docs/quickstart/index.md) - [Development guide](./docs/developer.md) +- [Release notes/Version history](https://github.com/armadaproject/armada/releases) - [API Documentation](./docs/developer/api.md) We expect readers of the documentation to have a basic understanding of Docker and Kubernetes; see, e.g., the following links: diff --git a/build/airflow-operator/Dockerfile b/build/airflow-operator/Dockerfile index ff086bf72be..a3d774b30d6 100644 --- a/build/airflow-operator/Dockerfile +++ b/build/airflow-operator/Dockerfile @@ -1,6 +1,6 @@ -ARG PYTHON_VERSION=3.8.15 - -FROM --platform=x86_64 python:${PYTHON_VERSION}-buster +ARG PLATFORM=x86_64 +ARG BASE_IMAGE=python:3.8.18-bookworm +FROM --platform=$PLATFORM ${BASE_IMAGE} RUN mkdir /proto diff --git a/build/python-client/Dockerfile b/build/python-client/Dockerfile index f59e3c4566b..10aa957944b 100644 --- a/build/python-client/Dockerfile +++ b/build/python-client/Dockerfile @@ -1,6 +1,7 @@ -ARG PYTHON_VERSION=3.8.18 +ARG PLATFORM=x86_64 +ARG BASE_IMAGE=python:3.8.18-bookworm -FROM --platform=x86_64 python:${PYTHON_VERSION}-bookworm +FROM --platform=$PLATFORM ${BASE_IMAGE} RUN mkdir /proto diff --git a/client/DotNet/Armada.Client.Test/Armada.Client.Test.csproj b/client/DotNet/Armada.Client.Test/Armada.Client.Test.csproj deleted file mode 100644 index 0c59ea66656..00000000000 --- a/client/DotNet/Armada.Client.Test/Armada.Client.Test.csproj +++ /dev/null @@ -1,22 +0,0 @@ - - - - netcoreapp3.0 - - false - - GResearch.Armada.Client.Test - - - - - - - - - - - - - - diff --git a/client/DotNet/Armada.Client.Test/Tests.cs b/client/DotNet/Armada.Client.Test/Tests.cs deleted file mode 100644 index 032246c5f2f..00000000000 --- a/client/DotNet/Armada.Client.Test/Tests.cs +++ /dev/null @@ -1,128 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Net.Http; -using System.Linq; -using System.Net.Http.Headers; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using NUnit.Framework; -using GResearch.Armada.Client; -using RichardSzalay.MockHttp; - -namespace GResearch.Armada.Client.Test -{ - public class Tests - { - [Test] - [Explicit("Intended for manual testing against armada server with proxy.")] - public async Task TestWatchingEvents() - { - var client = new ArmadaClient("http://localhost:8080", new HttpClient()); - - var queue = "test"; - var jobSet = $"set-{Guid.NewGuid()}"; - - // produce some events - await client.CreateQueueAsync(new ApiQueue {Name = queue, PriorityFactor = 200}); - var request = CreateJobRequest(jobSet); - var response = await client.SubmitJobsAsync(request); - var cancelResponse = - await client.CancelJobsAsync(new ApiJobCancelRequest {Queue = "test", JobSetId = jobSet}); - - using (var cts = new CancellationTokenSource()) - { - var eventCount = 0; - Task.Run(() => client.WatchEvents(queue, jobSet, null, cts.Token, m => eventCount++, e => throw e)); - await Task.Delay(TimeSpan.FromMinutes(2)); - cts.Cancel(); - Assert.That(eventCount, Is.EqualTo(4)); - } - } - - [Test] - public async Task TestSimpleJobSubmitFlow() - { - var queue = "test"; - var jobSet = $"set-{Guid.NewGuid()}"; - - IArmadaClient client = new ArmadaClient("http://localhost:8080", new HttpClient()); - await client.CreateQueueAsync(new ApiQueue {Name = queue, PriorityFactor = 200}); - - var request = CreateJobRequest(jobSet); - - var response = await client.SubmitJobsAsync(request); - var cancelResponse = - await client.CancelJobsAsync(new ApiJobCancelRequest {Queue = "test", JobSetId = jobSet}); - var events = await client.GetJobEventsStream(queue, jobSet, watch: false); - var allEvents = events.ToList(); - - Assert.That(allEvents, Is.Not.Empty); - Assert.That(allEvents[0].Result.Message.Submitted, Is.Not.Null); - } - - [Test] - public async Task TestProcessingUnknownEvents() - { - var mockHttp = new MockHttpMessageHandler(); - mockHttp.When("http://localhost:8080/*") - .Respond("application/json", - @"{""result"":{""Id"":""1593611590122-0"",""message"":{""Queued"":{""JobId"":""01ec5ae6f9wvya6cr6stzwty7v"",""JobSetId"":""set-bae48cc8-9f70-465f-ae5c-c92713b5f24f"",""Queue"":""test"",""Created"":""2020-07-01T13:53:10.122263955Z""}}}} - {""result"":{""Id"":""1593611590122-0"",""message"":{""UnknownEvent"":""test""}}} - {""error"": ""test error""} - {} - - {""a"":""b""}"); - - IArmadaClient client = new ArmadaClient("http://localhost:8080", new HttpClient(mockHttp)); - var events = (await client.GetJobEventsStream("queue", "jobSet", watch: false)).ToList(); - Assert.That(events.Count(), Is.EqualTo(2)); - Assert.That(events[0].Result.Message.Event, Is.Not.Null); - Assert.That(events[1].Error, Is.EqualTo("test error")); - } - - private static ApiJobSubmitRequest CreateJobRequest(string jobSet) - { - var pod = new V1PodSpec - { - Containers = new[] - { - new V1Container - { - Name = "Container1", - Image = "index.docker.io/library/ubuntu:latest", - Args = new[] {"sleep", "10s"}, - SecurityContext = new V1SecurityContext {RunAsUser = 1000}, - Resources = new V1ResourceRequirements - { - Requests = new V1ResourceList - { - ["cpu"] = "120m", - ["memory"] = "512Mi" - }, - Limits = new V1ResourceList - { - ["cpu"] = "120m", - ["memory"] = "512Mi" - } - } - } - } - }; - - return new ApiJobSubmitRequest - { - Queue = "test", - JobSetId = jobSet, - JobRequestItems = new[] - { - new ApiJobSubmitRequestItem - { - Priority = 1, - PodSpec = pod - } - }, - }; - } - } -} diff --git a/client/DotNet/Armada.Client.sln b/client/DotNet/Armada.Client.sln index b08d1f87c78..22ab8ac90c6 100644 --- a/client/DotNet/Armada.Client.sln +++ b/client/DotNet/Armada.Client.sln @@ -1,9 +1,5 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Armada.Client", "Armada.Client\Armada.Client.csproj", "{8016908D-697F-48B0-9AAA-048A64A06FA8}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Armada.Client.Test", "Armada.Client.Test\Armada.Client.Test.csproj", "{FFF216CF-27D6-4E56-B472-30092FF6AE4D}" -EndProject Project("{C67ED064-D6AE-4CC6-8EED-B831AEC436C3}") = "ArmadaProject.Io.Client", "ArmadaProject.Io.Client\ArmadaProject.Io.Client.csproj", "{C67ED064-D6AE-4CC6-8EED-B831AEC436C3}" EndProject Project("{042CF01C-060C-4B27-A5B7-E7500636451A}") = "ArmadaProject.Io.Client.Test", "ArmadaProject.Io.Client.Test\ArmadaProject.Io.Client.Test.csproj", "{042CF01C-060C-4B27-A5B7-E7500636451A}" @@ -14,14 +10,6 @@ Global Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {8016908D-697F-48B0-9AAA-048A64A06FA8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8016908D-697F-48B0-9AAA-048A64A06FA8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8016908D-697F-48B0-9AAA-048A64A06FA8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8016908D-697F-48B0-9AAA-048A64A06FA8}.Release|Any CPU.Build.0 = Release|Any CPU - {FFF216CF-27D6-4E56-B472-30092FF6AE4D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {FFF216CF-27D6-4E56-B472-30092FF6AE4D}.Debug|Any CPU.Build.0 = Debug|Any CPU - {FFF216CF-27D6-4E56-B472-30092FF6AE4D}.Release|Any CPU.ActiveCfg = Release|Any CPU - {FFF216CF-27D6-4E56-B472-30092FF6AE4D}.Release|Any CPU.Build.0 = Release|Any CPU {C67ED064-D6AE-4CC6-8EED-B831AEC436C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C67ED064-D6AE-4CC6-8EED-B831AEC436C3}.Debug|Any CPU.Build.0 = Debug|Any CPU {C67ED064-D6AE-4CC6-8EED-B831AEC436C3}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/client/DotNet/Armada.Client/Armada.Client.csproj b/client/DotNet/Armada.Client/Armada.Client.csproj deleted file mode 100644 index a6423b24cda..00000000000 --- a/client/DotNet/Armada.Client/Armada.Client.csproj +++ /dev/null @@ -1,27 +0,0 @@ - - - netstandard2.0 - G-Research.Armada.Client - 0.0.1 - G-Research - G-Research - Armada.Client is a .NET core library for communicating with Armada server implemented in C# - Copyright G-Research 2019 - https://github.com/armadaproject/armada - armada gresearch g-research .net c# dotnet - Apache-2.0 - GResearch.Armada.Client - - - - all - runtime; build; native; contentfiles; analyzers - - - - - - - - - diff --git a/client/DotNet/Armada.Client/Client.cs b/client/DotNet/Armada.Client/Client.cs deleted file mode 100644 index d0690fd5fdd..00000000000 --- a/client/DotNet/Armada.Client/Client.cs +++ /dev/null @@ -1,183 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace GResearch.Armada.Client -{ - public interface IEvent - { - string JobId { get; } - string JobSetId { get; } - string Queue { get; } - System.DateTimeOffset? Created { get; } - } - - public interface IArmadaClient - { - Task CancelJobsAsync(ApiJobCancelRequest body); - Task SubmitJobsAsync(ApiJobSubmitRequest body); - Task CreateQueueAsync(ApiQueue body); - Task UpdateQueueAsync(string name, ApiQueue body); - Task DeleteQueueAsync(string name); - Task GetQueueAsync(string name); - Task>> GetJobEventsStream(string queue, string jobSetId, string fromMessage = null, bool watch = false); - Task WatchEvents( - string queue, - string jobSetId, - string fromMessageId, - CancellationToken ct, - Action> onMessage, - Action onException = null); - } - - public partial class ApiEventMessage - { - public IEvent Event => Cancelled ?? Submitted ?? Queued ?? DuplicateFound ?? Leased ?? LeaseReturned ?? - LeaseExpired ?? Pending ?? Running ?? UnableToSchedule ?? - Failed ?? Succeeded ?? Reprioritized ?? Cancelling ?? Cancelled ?? Terminated ?? - Utilisation ?? IngressInfo ?? Reprioritizing ?? Updated ?? FailedCompressed as IEvent; - } - - public partial class ApiJobSubmittedEvent : IEvent {} - public partial class ApiJobQueuedEvent : IEvent {} - public partial class ApiJobDuplicateFoundEvent : IEvent {} - public partial class ApiJobLeasedEvent : IEvent {} - public partial class ApiJobLeaseReturnedEvent : IEvent {} - public partial class ApiJobLeaseExpiredEvent : IEvent {} - public partial class ApiJobPendingEvent : IEvent {} - public partial class ApiJobRunningEvent : IEvent {} - public partial class ApiJobUnableToScheduleEvent : IEvent {} - public partial class ApiJobFailedEvent : IEvent {} - public partial class ApiJobSucceededEvent : IEvent {} - public partial class ApiJobReprioritizedEvent : IEvent {} - public partial class ApiJobCancellingEvent : IEvent {} - public partial class ApiJobCancelledEvent : IEvent {} - public partial class ApiJobTerminatedEvent : IEvent {} - public partial class ApiJobUtilisationEvent : IEvent {} - public partial class ApiJobIngressInfoEvent : IEvent {} - public partial class ApiJobReprioritizingEvent : IEvent {} - public partial class ApiJobUpdatedEvent : IEvent {} - - public partial class ApiJobSubmitRequestItem - { - public ApiJobSubmitRequestItem() - { - ClientId = Guid.NewGuid().ToString("N"); - } - } - - public class StreamResponse - { - public T Result { get; set; } - public string Error { get; set; } - } - - public partial class ArmadaClient : IArmadaClient - { - public async Task>> GetJobEventsStream( - string queue, string jobSetId, string fromMessageId = null, bool watch = false) - { - var fileResponse = await GetJobSetEventsCoreAsync(queue, jobSetId, - new ApiJobSetRequest {FromMessageId = fromMessageId, Watch = watch}); - return ReadEventStream(fileResponse.Stream); - } - - private IEnumerable> ReadEventStream(Stream stream) - { - using (var reader = new StreamReader(stream)) - { - while (!reader.EndOfStream) - { - var line = reader.ReadLine(); - - var (_, eventMessage) = ProcessEventLine(null, line); - if (eventMessage != null) - { - yield return eventMessage; - } - } - } - } - - public async Task WatchEvents( - string queue, - string jobSetId, - string fromMessageId, - CancellationToken ct, - Action> onMessage, - Action onException = null) - { - var failCount = 0; - while (!ct.IsCancellationRequested) - { - try - { - using (var fileResponse = await GetJobSetEventsCoreAsync(queue, jobSetId, - new ApiJobSetRequest {FromMessageId = fromMessageId, Watch = true}, ct)) - using (var reader = new StreamReader(fileResponse.Stream)) - { - try - { - failCount = 0; - while (!ct.IsCancellationRequested && !reader.EndOfStream) - { - var line = await reader.ReadLineAsync(); - var (newMessageId, eventMessage) = ProcessEventLine(fromMessageId, line); - fromMessageId = newMessageId; - if (eventMessage != null) - { - onMessage(eventMessage); - } - } - } - catch (IOException) - { - // Stream was probably closed by the server, continue to reconnect - } - } - } - catch (TaskCanceledException) - { - // Server closed the connection, continue to reconnect - } - catch (Exception e) - { - failCount++; - onException?.Invoke(e); - // gradually back off - await Task.Delay(TimeSpan.FromSeconds(Math.Min(300, Math.Pow(2 ,failCount))), ct); - } - } - } - - private (string, StreamResponse) ProcessEventLine(string fromMessageId, string line) - { - try - { - var eventMessage = - JsonConvert.DeserializeObject>(line, - this.JsonSerializerSettings); - - fromMessageId = eventMessage?.Result?.Id ?? fromMessageId; - - // Ignore unknown event types - if (String.IsNullOrEmpty(eventMessage?.Error) && - eventMessage?.Result?.Message?.Event == null) - { - eventMessage = null; - } - return (fromMessageId, eventMessage); - } - catch(Exception) - { - // Ignore messages which can't be deserialized - } - - return (fromMessageId, null); - } - } -} - diff --git a/client/DotNet/Armada.Client/ClientGenerated.cs b/client/DotNet/Armada.Client/ClientGenerated.cs deleted file mode 100644 index b323d013373..00000000000 --- a/client/DotNet/Armada.Client/ClientGenerated.cs +++ /dev/null @@ -1,6717 +0,0 @@ -//---------------------- -// -// Generated using the NSwag toolchain v13.1.3.0 (NJsonSchema v10.0.27.0 (Newtonsoft.Json v12.0.0.0)) (http://NSwag.org) -// -//---------------------- - -#pragma warning disable 108 // Disable "CS0108 '{derivedDto}.ToJson()' hides inherited member '{dtoBase}.ToJson()'. Use the new keyword if hiding was intended." -#pragma warning disable 114 // Disable "CS0114 '{derivedDto}.RaisePropertyChanged(String)' hides inherited member 'dtoBase.RaisePropertyChanged(String)'. To make the current member override that implementation, add the override keyword. Otherwise add the new keyword." -#pragma warning disable 472 // Disable "CS0472 The result of the expression is always 'false' since a value of type 'Int32' is never equal to 'null' of type 'Int32?' -#pragma warning disable 1573 // Disable "CS1573 Parameter '...' has no matching param tag in the XML comment for ... -#pragma warning disable 1591 // Disable "CS1591 Missing XML comment for publicly visible type or member ..." - -namespace GResearch.Armada.Client -{ - using System = global::System; - - [System.CodeDom.Compiler.GeneratedCode("NSwag", "13.1.3.0 (NJsonSchema v10.0.27.0 (Newtonsoft.Json v12.0.0.0))")] - public partial class ArmadaClient - { - private string _baseUrl = ""; - private System.Net.Http.HttpClient _httpClient; - private System.Lazy _settings; - - public ArmadaClient(string baseUrl, System.Net.Http.HttpClient httpClient) - { - BaseUrl = baseUrl; - _httpClient = httpClient; - _settings = new System.Lazy(() => - { - var settings = new Newtonsoft.Json.JsonSerializerSettings(); - UpdateJsonSerializerSettings(settings); - return settings; - }); - } - - public string BaseUrl - { - get { return _baseUrl; } - set { _baseUrl = value; } - } - - protected Newtonsoft.Json.JsonSerializerSettings JsonSerializerSettings { get { return _settings.Value; } } - - partial void UpdateJsonSerializerSettings(Newtonsoft.Json.JsonSerializerSettings settings); - partial void PrepareRequest(System.Net.Http.HttpClient client, System.Net.Http.HttpRequestMessage request, string url); - partial void PrepareRequest(System.Net.Http.HttpClient client, System.Net.Http.HttpRequestMessage request, System.Text.StringBuilder urlBuilder); - partial void ProcessResponse(System.Net.Http.HttpClient client, System.Net.Http.HttpResponseMessage response); - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task CreateQueuesAsync(ApiQueueList body) - { - return CreateQueuesAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task CreateQueuesAsync(ApiQueueList body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/batched/create_queues"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response.(streaming responses) - /// A server side error occurred. - public System.Threading.Tasks.Task GetQueuesAsync(long? num) - { - return GetQueuesAsync(num, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response.(streaming responses) - /// A server side error occurred. - public async System.Threading.Tasks.Task GetQueuesAsync(long? num, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/batched/queues?"); - if (num != null) - { - urlBuilder_.Append(System.Uri.EscapeDataString("num") + "=").Append(System.Uri.EscapeDataString(ConvertToString(num, System.Globalization.CultureInfo.InvariantCulture))).Append("&"); - } - urlBuilder_.Length--; - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - request_.Method = new System.Net.Http.HttpMethod("GET"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task UpdateQueuesAsync(ApiQueueList body) - { - return UpdateQueuesAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task UpdateQueuesAsync(ApiQueueList body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/batched/update_queues"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("PUT"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response.(streaming responses) - /// A server side error occurred. - protected System.Threading.Tasks.Task GetJobSetEventsCoreAsync(string queue, string id, ApiJobSetRequest body) - { - return GetJobSetEventsCoreAsync(queue, id, body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response.(streaming responses) - /// A server side error occurred. - protected async System.Threading.Tasks.Task GetJobSetEventsCoreAsync(string queue, string id, ApiJobSetRequest body, System.Threading.CancellationToken cancellationToken) - { - if (queue == null) - throw new System.ArgumentNullException("queue"); - - if (id == null) - throw new System.ArgumentNullException("id"); - - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job-set/{queue}/{id}"); - urlBuilder_.Replace("{queue}", System.Uri.EscapeDataString(ConvertToString(queue, System.Globalization.CultureInfo.InvariantCulture))); - urlBuilder_.Replace("{id}", System.Uri.EscapeDataString(ConvertToString(id, System.Globalization.CultureInfo.InvariantCulture))); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/ndjson-stream")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200" || status_ == "206") - { - var responseStream_ = response_.Content == null ? System.IO.Stream.Null : await response_.Content.ReadAsStreamAsync().ConfigureAwait(false); - var fileResponse_ = new FileResponse((int)response_.StatusCode, headers_, responseStream_, null, response_); - client_ = null; response_ = null; // response and client are disposed by FileResponse - return fileResponse_; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task CancelJobsAsync(ApiJobCancelRequest body) - { - return CancelJobsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task CancelJobsAsync(ApiJobCancelRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/cancel"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task GetJobDetailsAsync(ApiJobDetailsRequest body) - { - return GetJobDetailsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task GetJobDetailsAsync(ApiJobDetailsRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/details"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task PreemptJobsAsync(ApiJobPreemptRequest body) - { - return PreemptJobsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task PreemptJobsAsync(ApiJobPreemptRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/preempt"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task ReprioritizeJobsAsync(ApiJobReprioritizeRequest body) - { - return ReprioritizeJobsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task ReprioritizeJobsAsync(ApiJobReprioritizeRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/reprioritize"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task GetJobStatusAsync(ApiJobStatusRequest body) - { - return GetJobStatusAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task GetJobStatusAsync(ApiJobStatusRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/status"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task SubmitJobsAsync(ApiJobSubmitRequest body) - { - return SubmitJobsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task SubmitJobsAsync(ApiJobSubmitRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/job/submit"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task CancelJobSetAsync(ApiJobSetCancelRequest body) - { - return CancelJobSetAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task CancelJobSetAsync(ApiJobSetCancelRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/jobset/cancel"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task CreateQueueAsync(ApiQueue body) - { - return CreateQueueAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task CreateQueueAsync(ApiQueue body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/queue"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task GetQueueAsync(string name) - { - return GetQueueAsync(name, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task GetQueueAsync(string name, System.Threading.CancellationToken cancellationToken) - { - if (name == null) - throw new System.ArgumentNullException("name"); - - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/queue/{name}"); - urlBuilder_.Replace("{name}", System.Uri.EscapeDataString(ConvertToString(name, System.Globalization.CultureInfo.InvariantCulture))); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - request_.Method = new System.Net.Http.HttpMethod("GET"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task UpdateQueueAsync(string name, ApiQueue body) - { - return UpdateQueueAsync(name, body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task UpdateQueueAsync(string name, ApiQueue body, System.Threading.CancellationToken cancellationToken) - { - if (name == null) - throw new System.ArgumentNullException("name"); - - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/queue/{name}"); - urlBuilder_.Replace("{name}", System.Uri.EscapeDataString(ConvertToString(name, System.Globalization.CultureInfo.InvariantCulture))); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("PUT"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task DeleteQueueAsync(string name) - { - return DeleteQueueAsync(name, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task DeleteQueueAsync(string name, System.Threading.CancellationToken cancellationToken) - { - if (name == null) - throw new System.ArgumentNullException("name"); - - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/queue/{name}"); - urlBuilder_.Replace("{name}", System.Uri.EscapeDataString(ConvertToString(name, System.Globalization.CultureInfo.InvariantCulture))); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - request_.Method = new System.Net.Http.HttpMethod("DELETE"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - /// A successful response. - /// A server side error occurred. - public System.Threading.Tasks.Task GetJobRunDetailsAsync(ApiJobRunDetailsRequest body) - { - return GetJobRunDetailsAsync(body, System.Threading.CancellationToken.None); - } - - /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. - /// A successful response. - /// A server side error occurred. - public async System.Threading.Tasks.Task GetJobRunDetailsAsync(ApiJobRunDetailsRequest body, System.Threading.CancellationToken cancellationToken) - { - var urlBuilder_ = new System.Text.StringBuilder(); - urlBuilder_.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/v1/run/details"); - - var client_ = _httpClient; - try - { - using (var request_ = new System.Net.Http.HttpRequestMessage()) - { - var content_ = new System.Net.Http.StringContent(Newtonsoft.Json.JsonConvert.SerializeObject(body, _settings.Value)); - content_.Headers.ContentType = System.Net.Http.Headers.MediaTypeHeaderValue.Parse("application/json"); - request_.Content = content_; - request_.Method = new System.Net.Http.HttpMethod("POST"); - request_.Headers.Accept.Add(System.Net.Http.Headers.MediaTypeWithQualityHeaderValue.Parse("application/json")); - - PrepareRequest(client_, request_, urlBuilder_); - var url_ = urlBuilder_.ToString(); - request_.RequestUri = new System.Uri(url_, System.UriKind.RelativeOrAbsolute); - PrepareRequest(client_, request_, url_); - - var response_ = await client_.SendAsync(request_, System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken).ConfigureAwait(false); - try - { - var headers_ = System.Linq.Enumerable.ToDictionary(response_.Headers, h_ => h_.Key, h_ => h_.Value); - if (response_.Content != null && response_.Content.Headers != null) - { - foreach (var item_ in response_.Content.Headers) - headers_[item_.Key] = item_.Value; - } - - ProcessResponse(client_, response_); - - var status_ = ((int)response_.StatusCode).ToString(); - if (status_ == "200") - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - return objectResponse_.Object; - } - else - { - var objectResponse_ = await ReadObjectResponseAsync(response_, headers_).ConfigureAwait(false); - throw new ApiException("An unexpected error response.", (int)response_.StatusCode, objectResponse_.Text, headers_, objectResponse_.Object, null); - } - } - finally - { - if (response_ != null) - response_.Dispose(); - } - } - } - finally - { - } - } - - protected struct ObjectResponseResult - { - public ObjectResponseResult(T responseObject, string responseText) - { - this.Object = responseObject; - this.Text = responseText; - } - - public T Object { get; } - - public string Text { get; } - } - - public bool ReadResponseAsString { get; set; } - - protected virtual async System.Threading.Tasks.Task> ReadObjectResponseAsync(System.Net.Http.HttpResponseMessage response, System.Collections.Generic.IReadOnlyDictionary> headers) - { - if (response == null || response.Content == null) - { - return new ObjectResponseResult(default(T), string.Empty); - } - - if (ReadResponseAsString) - { - var responseText = await response.Content.ReadAsStringAsync().ConfigureAwait(false); - try - { - var typedBody = Newtonsoft.Json.JsonConvert.DeserializeObject(responseText, JsonSerializerSettings); - return new ObjectResponseResult(typedBody, responseText); - } - catch (Newtonsoft.Json.JsonException exception) - { - var message = "Could not deserialize the response body string as " + typeof(T).FullName + "."; - throw new ApiException(message, (int)response.StatusCode, responseText, headers, exception); - } - } - else - { - try - { - using (var responseStream = await response.Content.ReadAsStreamAsync().ConfigureAwait(false)) - using (var streamReader = new System.IO.StreamReader(responseStream)) - using (var jsonTextReader = new Newtonsoft.Json.JsonTextReader(streamReader)) - { - var serializer = Newtonsoft.Json.JsonSerializer.Create(JsonSerializerSettings); - var typedBody = serializer.Deserialize(jsonTextReader); - return new ObjectResponseResult(typedBody, string.Empty); - } - } - catch (Newtonsoft.Json.JsonException exception) - { - var message = "Could not deserialize the response body stream as " + typeof(T).FullName + "."; - throw new ApiException(message, (int)response.StatusCode, string.Empty, headers, exception); - } - } - } - - private string ConvertToString(object value, System.Globalization.CultureInfo cultureInfo) - { - if (value is System.Enum) - { - string name = System.Enum.GetName(value.GetType(), value); - if (name != null) - { - var field = System.Reflection.IntrospectionExtensions.GetTypeInfo(value.GetType()).GetDeclaredField(name); - if (field != null) - { - var attribute = System.Reflection.CustomAttributeExtensions.GetCustomAttribute(field, typeof(System.Runtime.Serialization.EnumMemberAttribute)) - as System.Runtime.Serialization.EnumMemberAttribute; - if (attribute != null) - { - return attribute.Value != null ? attribute.Value : name; - } - } - } - } - else if (value is bool) { - return System.Convert.ToString(value, cultureInfo).ToLowerInvariant(); - } - else if (value is byte[]) - { - return System.Convert.ToBase64String((byte[]) value); - } - else if (value != null && value.GetType().IsArray) - { - var array = System.Linq.Enumerable.OfType((System.Array) value); - return string.Join(",", System.Linq.Enumerable.Select(array, o => ConvertToString(o, cultureInfo))); - } - - return System.Convert.ToString(value, cultureInfo); - } - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class PermissionsSubject - { - [Newtonsoft.Json.JsonProperty("kind", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Kind { get; set; } - - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class QueuePermissions - { - [Newtonsoft.Json.JsonProperty("subjects", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Subjects { get; set; } - - [Newtonsoft.Json.JsonProperty("verbs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Verbs { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiBatchQueueCreateResponse - { - [Newtonsoft.Json.JsonProperty("failedQueues", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection FailedQueues { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiBatchQueueUpdateResponse - { - [Newtonsoft.Json.JsonProperty("failedQueues", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection FailedQueues { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiCancellationResult - { - [Newtonsoft.Json.JsonProperty("cancelledIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection CancelledIds { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public enum ApiCause - { - [System.Runtime.Serialization.EnumMember(Value = @"Error")] - Error = 0, - - [System.Runtime.Serialization.EnumMember(Value = @"Evicted")] - Evicted = 1, - - [System.Runtime.Serialization.EnumMember(Value = @"OOM")] - OOM = 2, - - [System.Runtime.Serialization.EnumMember(Value = @"DeadlineExceeded")] - DeadlineExceeded = 3, - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiContainerStatus - { - [Newtonsoft.Json.JsonProperty("cause", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiCause? Cause { get; set; } - - [Newtonsoft.Json.JsonProperty("exitCode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? ExitCode { get; set; } - - [Newtonsoft.Json.JsonProperty("message", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Message { get; set; } - - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiEndMarker - { - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiEventMessage - { - [Newtonsoft.Json.JsonProperty("cancelled", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobCancelledEvent Cancelled { get; set; } - - [Newtonsoft.Json.JsonProperty("cancelling", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobCancellingEvent Cancelling { get; set; } - - [Newtonsoft.Json.JsonProperty("duplicateFound", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobDuplicateFoundEvent DuplicateFound { get; set; } - - [Newtonsoft.Json.JsonProperty("failed", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobFailedEvent Failed { get; set; } - - [Newtonsoft.Json.JsonProperty("failedCompressed", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobFailedEventCompressed FailedCompressed { get; set; } - - [Newtonsoft.Json.JsonProperty("ingressInfo", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobIngressInfoEvent IngressInfo { get; set; } - - [Newtonsoft.Json.JsonProperty("leaseExpired", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobLeaseExpiredEvent LeaseExpired { get; set; } - - [Newtonsoft.Json.JsonProperty("leaseReturned", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobLeaseReturnedEvent LeaseReturned { get; set; } - - [Newtonsoft.Json.JsonProperty("leased", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobLeasedEvent Leased { get; set; } - - [Newtonsoft.Json.JsonProperty("pending", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobPendingEvent Pending { get; set; } - - [Newtonsoft.Json.JsonProperty("preempted", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobPreemptedEvent Preempted { get; set; } - - [Newtonsoft.Json.JsonProperty("preempting", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobPreemptingEvent Preempting { get; set; } - - [Newtonsoft.Json.JsonProperty("queued", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobQueuedEvent Queued { get; set; } - - [Newtonsoft.Json.JsonProperty("reprioritized", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobReprioritizedEvent Reprioritized { get; set; } - - [Newtonsoft.Json.JsonProperty("reprioritizing", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobReprioritizingEvent Reprioritizing { get; set; } - - [Newtonsoft.Json.JsonProperty("running", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobRunningEvent Running { get; set; } - - [Newtonsoft.Json.JsonProperty("submitted", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobSubmittedEvent Submitted { get; set; } - - [Newtonsoft.Json.JsonProperty("succeeded", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobSucceededEvent Succeeded { get; set; } - - [Newtonsoft.Json.JsonProperty("terminated", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobTerminatedEvent Terminated { get; set; } - - [Newtonsoft.Json.JsonProperty("unableToSchedule", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobUnableToScheduleEvent UnableToSchedule { get; set; } - - [Newtonsoft.Json.JsonProperty("updated", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobUpdatedEvent Updated { get; set; } - - [Newtonsoft.Json.JsonProperty("utilisation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobUtilisationEvent Utilisation { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiEventStreamMessage - { - [Newtonsoft.Json.JsonProperty("id", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Id { get; set; } - - [Newtonsoft.Json.JsonProperty("message", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiEventMessage Message { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiIngressConfig - { - [Newtonsoft.Json.JsonProperty("annotations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Annotations { get; set; } - - [Newtonsoft.Json.JsonProperty("certName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string CertName { get; set; } - - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - [Newtonsoft.Json.JsonProperty("tlsEnabled", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? TlsEnabled { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiIngressType? Type { get; set; } - - [Newtonsoft.Json.JsonProperty("useClusterIP", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? UseClusterIP { get; set; } - - - } - - /// Ingress type is being kept here to maintain backwards compatibility for a while. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public enum ApiIngressType - { - [System.Runtime.Serialization.EnumMember(Value = @"Ingress")] - Ingress = 0, - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJob - { - [Newtonsoft.Json.JsonProperty("annotations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Annotations { get; set; } - - [Newtonsoft.Json.JsonProperty("clientId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClientId { get; set; } - - [Newtonsoft.Json.JsonProperty("compressedQueueOwnershipUserGroups", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public byte[] CompressedQueueOwnershipUserGroups { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("id", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Id { get; set; } - - /// Services can be provided either as Armada-specific config objects or as proper k8s objects. - /// These options are exclusive, i.e., if either ingress or services is provided, - /// then neither of k8s_ingress or k8s_service can be provided, and vice versa. - [Newtonsoft.Json.JsonProperty("ingress", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ingress { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("k8sIngress", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection K8sIngress { get; set; } - - [Newtonsoft.Json.JsonProperty("k8sService", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection K8sService { get; set; } - - [Newtonsoft.Json.JsonProperty("labels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Labels { get; set; } - - [Newtonsoft.Json.JsonProperty("namespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Namespace { get; set; } - - [Newtonsoft.Json.JsonProperty("owner", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Owner { get; set; } - - [Newtonsoft.Json.JsonProperty("podSpec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodSpec PodSpec { get; set; } - - [Newtonsoft.Json.JsonProperty("podSpecs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection PodSpecs { get; set; } - - [Newtonsoft.Json.JsonProperty("priority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? Priority { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("queueOwnershipUserGroups", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection QueueOwnershipUserGroups { get; set; } - - /// Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - [Newtonsoft.Json.JsonProperty("queueTtlSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string QueueTtlSeconds { get; set; } - - [Newtonsoft.Json.JsonProperty("requiredNodeLabels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary RequiredNodeLabels { get; set; } - - /// Indicates which scheduler should manage this job. - /// If empty, the default scheduler is used. - [Newtonsoft.Json.JsonProperty("scheduler", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Scheduler { get; set; } - - /// max( - /// - /// sum across all containers, - /// max over all init containers, - /// - /// ) - /// - /// This is because containers run in parallel, whereas initContainers run serially. - /// This field is populated automatically at submission. - /// Submitting a job with this field already populated results in an error. - [Newtonsoft.Json.JsonProperty("schedulingResourceRequirements", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceRequirements SchedulingResourceRequirements { get; set; } - - [Newtonsoft.Json.JsonProperty("services", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Services { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobCancelRequest - { - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobIds { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobCancelledEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobCancellingEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobDetails - { - [Newtonsoft.Json.JsonProperty("cancelReason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string CancelReason { get; set; } - - [Newtonsoft.Json.JsonProperty("cancelTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? CancelTs { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobRuns", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobRuns { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSpec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJob JobSpec { get; set; } - - [Newtonsoft.Json.JsonProperty("jobset", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Jobset { get; set; } - - [Newtonsoft.Json.JsonProperty("lastTransitionTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? LastTransitionTs { get; set; } - - [Newtonsoft.Json.JsonProperty("latestRunId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string LatestRunId { get; set; } - - [Newtonsoft.Json.JsonProperty("namespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Namespace { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("state", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiJobState? State { get; set; } - - [Newtonsoft.Json.JsonProperty("submittedTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? SubmittedTs { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobDetailsRequest - { - [Newtonsoft.Json.JsonProperty("expandJobRun", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ExpandJobRun { get; set; } - - [Newtonsoft.Json.JsonProperty("expandJobSpec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ExpandJobSpec { get; set; } - - [Newtonsoft.Json.JsonProperty("jobIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobIds { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobDetailsResponse - { - [Newtonsoft.Json.JsonProperty("jobDetails", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary JobDetails { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobDuplicateFoundEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("originalJobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string OriginalJobId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobFailedEvent - { - [Newtonsoft.Json.JsonProperty("cause", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiCause? Cause { get; set; } - - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("containerStatuses", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ContainerStatuses { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("exitCodes", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary ExitCodes { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobFailedEventCompressed - { - [Newtonsoft.Json.JsonProperty("event", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public byte[] Event { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobIngressInfoEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("ingressAddresses", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary IngressAddresses { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobLeaseExpiredEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobLeaseReturnedEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - [Newtonsoft.Json.JsonProperty("runAttempted", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? RunAttempted { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobLeasedEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobPendingEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobPreemptRequest - { - [Newtonsoft.Json.JsonProperty("jobIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobIds { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobPreemptedEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("preemptiveJobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PreemptiveJobId { get; set; } - - [Newtonsoft.Json.JsonProperty("preemptiveRunId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PreemptiveRunId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("runId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string RunId { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobPreemptingEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobQueuedEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobReprioritizeRequest - { - [Newtonsoft.Json.JsonProperty("jobIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobIds { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("newPriority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? NewPriority { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobReprioritizeResponse - { - [Newtonsoft.Json.JsonProperty("reprioritizationResults", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary ReprioritizationResults { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobReprioritizedEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("newPriority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? NewPriority { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobReprioritizingEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("newPriority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? NewPriority { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobRunDetails - { - [Newtonsoft.Json.JsonProperty("cluster", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Cluster { get; set; } - - [Newtonsoft.Json.JsonProperty("finishedTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? FinishedTs { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("leasedTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? LeasedTs { get; set; } - - [Newtonsoft.Json.JsonProperty("node", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Node { get; set; } - - [Newtonsoft.Json.JsonProperty("pendingTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? PendingTs { get; set; } - - [Newtonsoft.Json.JsonProperty("runId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string RunId { get; set; } - - [Newtonsoft.Json.JsonProperty("startedTs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? StartedTs { get; set; } - - [Newtonsoft.Json.JsonProperty("state", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiJobRunState? State { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobRunDetailsRequest - { - [Newtonsoft.Json.JsonProperty("runIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection RunIds { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobRunDetailsResponse - { - [Newtonsoft.Json.JsonProperty("jobRunDetails", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary JobRunDetails { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public enum ApiJobRunState - { - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_UNKNOWN")] - RUN_STATE_UNKNOWN = 0, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_LEASED")] - RUN_STATE_LEASED = 1, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_PENDING")] - RUN_STATE_PENDING = 2, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_RUNNING")] - RUN_STATE_RUNNING = 3, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_SUCCEEDED")] - RUN_STATE_SUCCEEDED = 4, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_FAILED")] - RUN_STATE_FAILED = 5, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_PREEMPTED")] - RUN_STATE_PREEMPTED = 6, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_CANCELLED")] - RUN_STATE_CANCELLED = 7, - - [System.Runtime.Serialization.EnumMember(Value = @"RUN_STATE_LEASE_EXPIRED")] - RUN_STATE_LEASE_EXPIRED = 8, - - [System.Runtime.Serialization.EnumMember(Value = @"RUNS_STATE_LEASE_RETURNED")] - RUNS_STATE_LEASE_RETURNED = 9, - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobRunningEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSetCancelRequest - { - [Newtonsoft.Json.JsonProperty("filter", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJobSetFilter Filter { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSetFilter - { - [Newtonsoft.Json.JsonProperty("states", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore, ItemConverterType = typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public System.Collections.Generic.ICollection States { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSetRequest - { - [Newtonsoft.Json.JsonProperty("errorIfMissing", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ErrorIfMissing { get; set; } - - [Newtonsoft.Json.JsonProperty("forceLegacy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ForceLegacy { get; set; } - - [Newtonsoft.Json.JsonProperty("forceNew", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ForceNew { get; set; } - - [Newtonsoft.Json.JsonProperty("fromMessageId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FromMessageId { get; set; } - - [Newtonsoft.Json.JsonProperty("id", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Id { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("watch", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Watch { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public enum ApiJobState - { - [System.Runtime.Serialization.EnumMember(Value = @"QUEUED")] - QUEUED = 0, - - [System.Runtime.Serialization.EnumMember(Value = @"PENDING")] - PENDING = 1, - - [System.Runtime.Serialization.EnumMember(Value = @"RUNNING")] - RUNNING = 2, - - [System.Runtime.Serialization.EnumMember(Value = @"SUCCEEDED")] - SUCCEEDED = 3, - - [System.Runtime.Serialization.EnumMember(Value = @"FAILED")] - FAILED = 4, - - [System.Runtime.Serialization.EnumMember(Value = @"UNKNOWN")] - UNKNOWN = 5, - - [System.Runtime.Serialization.EnumMember(Value = @"SUBMITTED")] - SUBMITTED = 6, - - [System.Runtime.Serialization.EnumMember(Value = @"LEASED")] - LEASED = 7, - - [System.Runtime.Serialization.EnumMember(Value = @"PREEMPTED")] - PREEMPTED = 8, - - [System.Runtime.Serialization.EnumMember(Value = @"CANCELLED")] - CANCELLED = 9, - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobStatusRequest - { - [Newtonsoft.Json.JsonProperty("jobIds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobIds { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobStatusResponse - { - [Newtonsoft.Json.JsonProperty("jobStates", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary JobStates { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSubmitRequest - { - [Newtonsoft.Json.JsonProperty("jobRequestItems", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobRequestItems { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSubmitRequestItem - { - [Newtonsoft.Json.JsonProperty("annotations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Annotations { get; set; } - - [Newtonsoft.Json.JsonProperty("clientId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClientId { get; set; } - - [Newtonsoft.Json.JsonProperty("ingress", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ingress { get; set; } - - [Newtonsoft.Json.JsonProperty("labels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Labels { get; set; } - - [Newtonsoft.Json.JsonProperty("namespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Namespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podSpec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodSpec PodSpec { get; set; } - - [Newtonsoft.Json.JsonProperty("podSpecs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection PodSpecs { get; set; } - - [Newtonsoft.Json.JsonProperty("priority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? Priority { get; set; } - - /// Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - [Newtonsoft.Json.JsonProperty("queueTtlSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string QueueTtlSeconds { get; set; } - - [Newtonsoft.Json.JsonProperty("requiredNodeLabels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary RequiredNodeLabels { get; set; } - - /// Indicates which scheduler should manage this job. - /// If empty, the default scheduler is used. - [Newtonsoft.Json.JsonProperty("scheduler", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Scheduler { get; set; } - - [Newtonsoft.Json.JsonProperty("services", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Services { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSubmitResponse - { - [Newtonsoft.Json.JsonProperty("jobResponseItems", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection JobResponseItems { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSubmitResponseItem - { - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Error { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSubmittedEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("job", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJob Job { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobSucceededEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobTerminatedEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobUnableToScheduleEvent - { - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobUpdatedEvent - { - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("job", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiJob Job { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("requestor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Requestor { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiJobUtilisationEvent - { - [Newtonsoft.Json.JsonProperty("MaxResourcesForPeriod", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary MaxResourcesForPeriod { get; set; } - - [Newtonsoft.Json.JsonProperty("clusterId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterId { get; set; } - - [Newtonsoft.Json.JsonProperty("created", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Created { get; set; } - - [Newtonsoft.Json.JsonProperty("jobId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobId { get; set; } - - [Newtonsoft.Json.JsonProperty("jobSetId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string JobSetId { get; set; } - - [Newtonsoft.Json.JsonProperty("kubernetesId", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string KubernetesId { get; set; } - - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - [Newtonsoft.Json.JsonProperty("podName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodName { get; set; } - - [Newtonsoft.Json.JsonProperty("podNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PodNamespace { get; set; } - - [Newtonsoft.Json.JsonProperty("podNumber", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PodNumber { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Queue { get; set; } - - [Newtonsoft.Json.JsonProperty("totalCumulativeUsage", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary TotalCumulativeUsage { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiPriorityClassPoolResourceLimits - { - [Newtonsoft.Json.JsonProperty("maximumResourceFraction", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary MaximumResourceFraction { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiPriorityClassResourceLimits - { - /// Limits resources assigned to jobs of this priority class. - /// Specifically, jobs of this priority class are only scheduled if doing so does not exceed this limit. - [Newtonsoft.Json.JsonProperty("maximumResourceFraction", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary MaximumResourceFraction { get; set; } - - /// Per-pool override of maximum_resource_fraction. - /// If missing for a particular pool, maximum_resource_fraction is used instead for that pool. - [Newtonsoft.Json.JsonProperty("maximumResourceFractionByPool", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary MaximumResourceFractionByPool { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiQueue - { - [Newtonsoft.Json.JsonProperty("groupOwners", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection GroupOwners { get; set; } - - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("permissions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Permissions { get; set; } - - [Newtonsoft.Json.JsonProperty("priorityFactor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public double? PriorityFactor { get; set; } - - [Newtonsoft.Json.JsonProperty("resourceLimits", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary ResourceLimits { get; set; } - - /// Map from priority class name to resource limit overrides for this queue and priority class. - /// If provided for a priority class, global limits for that priority class do not apply to this queue. - [Newtonsoft.Json.JsonProperty("resourceLimitsByPriorityClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary ResourceLimitsByPriorityClassName { get; set; } - - [Newtonsoft.Json.JsonProperty("userOwners", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection UserOwners { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiQueueCreateResponse - { - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Error { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiQueue Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiQueueList - { - [Newtonsoft.Json.JsonProperty("queues", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Queues { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiQueueUpdateResponse - { - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Error { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiQueue Queue { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiServiceConfig - { - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public ApiServiceType? Type { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public enum ApiServiceType - { - [System.Runtime.Serialization.EnumMember(Value = @"NodePort")] - NodePort = 0, - - [System.Runtime.Serialization.EnumMember(Value = @"Headless")] - Headless = 1, - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ApiStreamingQueueMessage - { - [Newtonsoft.Json.JsonProperty("end", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiEndMarker End { get; set; } - - [Newtonsoft.Json.JsonProperty("queue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiQueue Queue { get; set; } - - - } - - /// +protobuf=true - /// +protobuf.options.(gogoproto.goproto_stringer)=false - /// +k8s:openapi-gen=true - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class IntstrIntOrString - { - [Newtonsoft.Json.JsonProperty("IntVal", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? IntVal { get; set; } - - [Newtonsoft.Json.JsonProperty("StrVal", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StrVal { get; set; } - - [Newtonsoft.Json.JsonProperty("Type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? Type { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class ProtobufAny - { - [Newtonsoft.Json.JsonProperty("typeUrl", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TypeUrl { get; set; } - - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public byte[] Value { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class RuntimeError - { - [Newtonsoft.Json.JsonProperty("code", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Code { get; set; } - - [Newtonsoft.Json.JsonProperty("details", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Details { get; set; } - - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Error { get; set; } - - [Newtonsoft.Json.JsonProperty("message", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Message { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class RuntimeStreamError - { - [Newtonsoft.Json.JsonProperty("details", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Details { get; set; } - - [Newtonsoft.Json.JsonProperty("grpcCode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? GrpcCode { get; set; } - - [Newtonsoft.Json.JsonProperty("httpCode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? HttpCode { get; set; } - - [Newtonsoft.Json.JsonProperty("httpStatus", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string HttpStatus { get; set; } - - [Newtonsoft.Json.JsonProperty("message", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Message { get; set; } - - - } - - /// An AWS EBS disk must exist before mounting to a container. The disk - /// must also be in the same AWS zone as the kubelet. An AWS EBS disk - /// can only be mounted as read/write once. AWS EBS volumes support - /// ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1AWSElasticBlockStoreVolumeSource - { - /// Filesystem type of the volume that you want to mount. - /// Tip: Ensure that the filesystem type is supported by the host operating system. - /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - /// TODO: how do we prevent errors in the filesystem from compromising the machine - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// The partition in the volume that you want to mount. - /// If omitted, the default is to mount by volume name. - /// Examples: For volume /dev/sda1, you specify the partition as "1". - /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - /// +optional - [Newtonsoft.Json.JsonProperty("partition", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Partition { get; set; } - - /// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - /// If omitted, the default is "false". - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - [Newtonsoft.Json.JsonProperty("volumeID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeID { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Affinity - { - [Newtonsoft.Json.JsonProperty("nodeAffinity", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1NodeAffinity NodeAffinity { get; set; } - - [Newtonsoft.Json.JsonProperty("podAffinity", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodAffinity PodAffinity { get; set; } - - [Newtonsoft.Json.JsonProperty("podAntiAffinity", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodAntiAffinity PodAntiAffinity { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1AzureDiskVolumeSource - { - [Newtonsoft.Json.JsonProperty("cachingMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string CachingMode { get; set; } - - /// The Name of the data disk in the blob storage - [Newtonsoft.Json.JsonProperty("diskName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DiskName { get; set; } - - /// The URI the data disk in the blob storage - [Newtonsoft.Json.JsonProperty("diskURI", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DiskURI { get; set; } - - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - [Newtonsoft.Json.JsonProperty("kind", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Kind { get; set; } - - /// Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1AzureFileVolumeSource - { - /// Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// the name of secret that contains Azure Storage Account Name and Key - [Newtonsoft.Json.JsonProperty("secretName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SecretName { get; set; } - - /// Share Name - [Newtonsoft.Json.JsonProperty("shareName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ShareName { get; set; } - - - } - - /// Represents a source location of a volume to mount, managed by an external CSI driver - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1CSIVolumeSource - { - /// Driver is the name of the CSI driver that handles this volume. - /// Consult with your admin for the correct name as registered in the cluster. - [Newtonsoft.Json.JsonProperty("driver", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Driver { get; set; } - - /// Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". - /// If not provided, the empty value is passed to the associated CSI driver - /// which will determine the default filesystem to apply. - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - [Newtonsoft.Json.JsonProperty("nodePublishSecretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference NodePublishSecretRef { get; set; } - - /// Specifies a read-only configuration for the volume. - /// Defaults to false (read/write). - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// VolumeAttributes stores driver-specific properties that are passed to the CSI - /// driver. Consult your driver's documentation for supported values. - /// +optional - [Newtonsoft.Json.JsonProperty("volumeAttributes", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary VolumeAttributes { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Capabilities - { - /// Added capabilities - /// +optional - [Newtonsoft.Json.JsonProperty("add", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Add { get; set; } - - /// Removed capabilities - /// +optional - [Newtonsoft.Json.JsonProperty("drop", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Drop { get; set; } - - - } - - /// Represents a Ceph Filesystem mount that lasts the lifetime of a pod - /// Cephfs volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1CephFSVolumeSource - { - /// Required: Monitors is a collection of Ceph monitors - /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - [Newtonsoft.Json.JsonProperty("monitors", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Monitors { get; set; } - - /// Optional: Used as the mounted root, rather than the full Ceph tree, default is / - /// +optional - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - /// Optional: Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("secretFile", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SecretFile { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// Optional: User is the rados user name, default is admin - /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("user", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string User { get; set; } - - - } - - /// A Cinder volume must exist before mounting to a container. - /// The volume must also be in the same region as the kubelet. - /// Cinder volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1CinderVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Optional: Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// volume id used to identify the volume in cinder. - /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md - [Newtonsoft.Json.JsonProperty("volumeID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeID { get; set; } - - - } - - /// ClientIPConfig represents the configurations of Client IP based session affinity. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ClientIPConfig - { - [Newtonsoft.Json.JsonProperty("timeoutSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? TimeoutSeconds { get; set; } - - - } - - /// // other fields - /// } - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Condition - { - [Newtonsoft.Json.JsonProperty("lastTransitionTime", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? LastTransitionTime { get; set; } - - [Newtonsoft.Json.JsonProperty("message", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Message { get; set; } - - [Newtonsoft.Json.JsonProperty("observedGeneration", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ObservedGeneration { get; set; } - - [Newtonsoft.Json.JsonProperty("reason", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Reason { get; set; } - - [Newtonsoft.Json.JsonProperty("status", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Status { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Type { get; set; } - - - } - - /// The contents of the target ConfigMap's Data field will represent the - /// key-value pairs as environment variables. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ConfigMapEnvSource - { - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the ConfigMap must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ConfigMapKeySelector - { - /// The key to select. - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the ConfigMap or its key must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// The contents of the target ConfigMap's Data field will be presented in a - /// projected volume as files using the keys in the Data field as the file names, - /// unless the items element is populated with specific mappings of keys to paths. - /// Note that this is identical to a configmap volume source without the default - /// mode. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ConfigMapProjection - { - /// If unspecified, each key-value pair in the Data field of the referenced - /// ConfigMap will be projected into the volume as a file whose name is the - /// key and content is the value. If specified, the listed keys will be - /// projected into the specified paths, and unlisted keys will not be - /// present. If a key is specified which is not present in the ConfigMap, - /// the volume setup will error unless it is marked optional. Paths must be - /// relative and may not contain the '..' path or start with '..'. - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the ConfigMap or its keys must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// The contents of the target ConfigMap's Data field will be presented in a - /// volume as files using the keys in the Data field as the file names, unless - /// the items element is populated with specific mappings of keys to paths. - /// ConfigMap volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ConfigMapVolumeSource - { - /// Optional: mode bits used to set permissions on created files by default. - /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - /// Defaults to 0644. - /// Directories within the path are not affected by this setting. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("defaultMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? DefaultMode { get; set; } - - /// If unspecified, each key-value pair in the Data field of the referenced - /// ConfigMap will be projected into the volume as a file whose name is the - /// key and content is the value. If specified, the listed keys will be - /// projected into the specified paths, and unlisted keys will not be - /// present. If a key is specified which is not present in the ConfigMap, - /// the volume setup will error unless it is marked optional. Paths must be - /// relative and may not contain the '..' path or start with '..'. - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the ConfigMap or its keys must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Container - { - /// Arguments to the entrypoint. - /// The docker image's CMD is used if this is not provided. - /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - /// of whether the variable exists or not. Cannot be updated. - /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - /// +optional - [Newtonsoft.Json.JsonProperty("args", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Args { get; set; } - - /// Entrypoint array. Not executed within a shell. - /// The docker image's ENTRYPOINT is used if this is not provided. - /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - /// of whether the variable exists or not. Cannot be updated. - /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - /// +optional - [Newtonsoft.Json.JsonProperty("command", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Command { get; set; } - - /// List of environment variables to set in the container. - /// Cannot be updated. - /// +optional - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("env", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Env { get; set; } - - /// List of sources to populate environment variables in the container. - /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys - /// will be reported as an event when the container is starting. When a key exists in multiple - /// sources, the value associated with the last source will take precedence. - /// Values defined by an Env with a duplicate key will take precedence. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("envFrom", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection EnvFrom { get; set; } - - /// Docker image name. - /// More info: https://kubernetes.io/docs/concepts/containers/images - /// This field is optional to allow higher level config management to default or override - /// container images in workload controllers like Deployments and StatefulSets. - /// +optional - [Newtonsoft.Json.JsonProperty("image", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Image { get; set; } - - [Newtonsoft.Json.JsonProperty("imagePullPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ImagePullPolicy { get; set; } - - [Newtonsoft.Json.JsonProperty("lifecycle", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Lifecycle Lifecycle { get; set; } - - [Newtonsoft.Json.JsonProperty("livenessProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe LivenessProbe { get; set; } - - /// Name of the container specified as a DNS_LABEL. - /// Each container in a pod must have a unique name (DNS_LABEL). - /// Cannot be updated. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// List of ports to expose from the container. Exposing a port here gives - /// the system additional information about the network connections a - /// container uses, but is primarily informational. Not specifying a port here - /// DOES NOT prevent that port from being exposed. Any port which is - /// listening on the default "0.0.0.0" address inside a container will be - /// accessible from the network. - /// Cannot be updated. - /// +optional - /// +patchMergeKey=containerPort - /// +patchStrategy=merge - /// +listType=map - /// +listMapKey=containerPort - /// +listMapKey=protocol - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - [Newtonsoft.Json.JsonProperty("readinessProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe ReadinessProbe { get; set; } - - [Newtonsoft.Json.JsonProperty("resources", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceRequirements Resources { get; set; } - - [Newtonsoft.Json.JsonProperty("securityContext", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecurityContext SecurityContext { get; set; } - - [Newtonsoft.Json.JsonProperty("startupProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe StartupProbe { get; set; } - - /// Whether this container should allocate a buffer for stdin in the container runtime. If this - /// is not set, reads from stdin in the container will always result in EOF. - /// Default is false. - /// +optional - [Newtonsoft.Json.JsonProperty("stdin", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Stdin { get; set; } - - /// Whether the container runtime should close the stdin channel after it has been opened by - /// a single attach. When stdin is true the stdin stream will remain open across multiple attach - /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, - /// at which time stdin is closed and remains closed until the container is restarted. If this - /// flag is false, a container processes that reads from stdin will never receive an EOF. - /// Default is false - /// +optional - [Newtonsoft.Json.JsonProperty("stdinOnce", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? StdinOnce { get; set; } - - /// Optional: Path at which the file to which the container's termination message - /// will be written is mounted into the container's filesystem. - /// Message written is intended to be brief final status, such as an assertion failure message. - /// Will be truncated by the node if greater than 4096 bytes. The total message length across - /// all containers will be limited to 12kb. - /// Defaults to /dev/termination-log. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("terminationMessagePath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TerminationMessagePath { get; set; } - - [Newtonsoft.Json.JsonProperty("terminationMessagePolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TerminationMessagePolicy { get; set; } - - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - /// Default is false. - /// +optional - [Newtonsoft.Json.JsonProperty("tty", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Tty { get; set; } - - /// volumeDevices is the list of block devices to be used by the container. - /// +patchMergeKey=devicePath - /// +patchStrategy=merge - /// +optional - [Newtonsoft.Json.JsonProperty("volumeDevices", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection VolumeDevices { get; set; } - - /// Pod volumes to mount into the container's filesystem. - /// Cannot be updated. - /// +optional - /// +patchMergeKey=mountPath - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("volumeMounts", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection VolumeMounts { get; set; } - - /// Container's working directory. - /// If not specified, the container runtime's default will be used, which - /// might be configured in the container image. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("workingDir", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string WorkingDir { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ContainerPort - { - /// Number of port to expose on the pod's IP address. - /// This must be a valid port number, 0 < x < 65536. - [Newtonsoft.Json.JsonProperty("containerPort", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? ContainerPort { get; set; } - - /// What host IP to bind the external port to. - /// +optional - [Newtonsoft.Json.JsonProperty("hostIP", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string HostIP { get; set; } - - /// Number of port to expose on the host. - /// If specified, this must be a valid port number, 0 < x < 65536. - /// If HostNetwork is specified, this must match ContainerPort. - /// Most containers do not need this. - /// +optional - [Newtonsoft.Json.JsonProperty("hostPort", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? HostPort { get; set; } - - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - /// named port in a pod must have a unique name. Name for the port that can be - /// referred to by services. - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("protocol", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Protocol { get; set; } - - - } - - /// Note that this is identical to a downwardAPI volume source without the default - /// mode. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1DownwardAPIProjection - { - /// Items is a list of DownwardAPIVolume file - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - - } - - /// DownwardAPIVolumeFile represents information to create the file containing the pod field - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1DownwardAPIVolumeFile - { - [Newtonsoft.Json.JsonProperty("fieldRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ObjectFieldSelector FieldRef { get; set; } - - /// Optional: mode bits used to set permissions on this file, must be an octal value - /// between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - /// If not specified, the volume defaultMode will be used. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("mode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Mode { get; set; } - - /// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - [Newtonsoft.Json.JsonProperty("resourceFieldRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceFieldSelector ResourceFieldRef { get; set; } - - - } - - /// Downward API volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1DownwardAPIVolumeSource - { - /// Optional: mode bits to use on created files by default. Must be a - /// Optional: mode bits used to set permissions on created files by default. - /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - /// Defaults to 0644. - /// Directories within the path are not affected by this setting. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("defaultMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? DefaultMode { get; set; } - - /// Items is a list of downward API volume file - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - - } - - /// Empty directory volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EmptyDirVolumeSource - { - [Newtonsoft.Json.JsonProperty("medium", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Medium { get; set; } - - [Newtonsoft.Json.JsonProperty("sizeLimit", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SizeLimit { get; set; } - - - } - - /// EnvFromSource represents the source of a set of ConfigMaps - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EnvFromSource - { - [Newtonsoft.Json.JsonProperty("configMapRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ConfigMapEnvSource ConfigMapRef { get; set; } - - /// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - /// +optional - [Newtonsoft.Json.JsonProperty("prefix", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Prefix { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecretEnvSource SecretRef { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EnvVar - { - /// Name of the environment variable. Must be a C_IDENTIFIER. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Variable references $(VAR_NAME) are expanded - /// using the previously defined environment variables in the container and - /// any service environment variables. If a variable cannot be resolved, - /// the reference in the input string will be unchanged. Double $$ are reduced - /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - /// Escaped references will never be expanded, regardless of whether the variable - /// exists or not. - /// Defaults to "". - /// +optional - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Value { get; set; } - - [Newtonsoft.Json.JsonProperty("valueFrom", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1EnvVarSource ValueFrom { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EnvVarSource - { - [Newtonsoft.Json.JsonProperty("configMapKeyRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ConfigMapKeySelector ConfigMapKeyRef { get; set; } - - [Newtonsoft.Json.JsonProperty("fieldRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ObjectFieldSelector FieldRef { get; set; } - - [Newtonsoft.Json.JsonProperty("resourceFieldRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceFieldSelector ResourceFieldRef { get; set; } - - [Newtonsoft.Json.JsonProperty("secretKeyRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecretKeySelector SecretKeyRef { get; set; } - - - } - - /// An EphemeralContainer is a container that may be added temporarily to an existing pod for - /// user-initiated activities such as debugging. Ephemeral containers have no resource or - /// scheduling guarantees, and they will not be restarted when they exit or when a pod is - /// removed or restarted. If an ephemeral container causes a pod to exceed its resource - /// allocation, the pod may be evicted. - /// Ephemeral containers may not be added by directly updating the pod spec. They must be added - /// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec - /// once added. - /// This is an alpha feature enabled by the EphemeralContainers feature flag. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EphemeralContainer - { - /// Arguments to the entrypoint. - /// The docker image's CMD is used if this is not provided. - /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - /// of whether the variable exists or not. Cannot be updated. - /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - /// +optional - [Newtonsoft.Json.JsonProperty("args", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Args { get; set; } - - /// Entrypoint array. Not executed within a shell. - /// The docker image's ENTRYPOINT is used if this is not provided. - /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - /// of whether the variable exists or not. Cannot be updated. - /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - /// +optional - [Newtonsoft.Json.JsonProperty("command", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Command { get; set; } - - /// List of environment variables to set in the container. - /// Cannot be updated. - /// +optional - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("env", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Env { get; set; } - - /// List of sources to populate environment variables in the container. - /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys - /// will be reported as an event when the container is starting. When a key exists in multiple - /// sources, the value associated with the last source will take precedence. - /// Values defined by an Env with a duplicate key will take precedence. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("envFrom", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection EnvFrom { get; set; } - - /// Docker image name. - /// More info: https://kubernetes.io/docs/concepts/containers/images - [Newtonsoft.Json.JsonProperty("image", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Image { get; set; } - - [Newtonsoft.Json.JsonProperty("imagePullPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ImagePullPolicy { get; set; } - - [Newtonsoft.Json.JsonProperty("lifecycle", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Lifecycle Lifecycle { get; set; } - - [Newtonsoft.Json.JsonProperty("livenessProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe LivenessProbe { get; set; } - - /// Name of the ephemeral container specified as a DNS_LABEL. - /// This name must be unique among all containers, init containers and ephemeral containers. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Ports are not allowed for ephemeral containers. - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - [Newtonsoft.Json.JsonProperty("readinessProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe ReadinessProbe { get; set; } - - [Newtonsoft.Json.JsonProperty("resources", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceRequirements Resources { get; set; } - - [Newtonsoft.Json.JsonProperty("securityContext", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecurityContext SecurityContext { get; set; } - - [Newtonsoft.Json.JsonProperty("startupProbe", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Probe StartupProbe { get; set; } - - /// Whether this container should allocate a buffer for stdin in the container runtime. If this - /// is not set, reads from stdin in the container will always result in EOF. - /// Default is false. - /// +optional - [Newtonsoft.Json.JsonProperty("stdin", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Stdin { get; set; } - - /// Whether the container runtime should close the stdin channel after it has been opened by - /// a single attach. When stdin is true the stdin stream will remain open across multiple attach - /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, - /// at which time stdin is closed and remains closed until the container is restarted. If this - /// flag is false, a container processes that reads from stdin will never receive an EOF. - /// Default is false - /// +optional - [Newtonsoft.Json.JsonProperty("stdinOnce", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? StdinOnce { get; set; } - - /// If set, the name of the container from PodSpec that this ephemeral container targets. - /// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - /// If not set then the ephemeral container is run in whatever namespaces are shared - /// for the pod. Note that the container runtime must support this feature. - /// +optional - [Newtonsoft.Json.JsonProperty("targetContainerName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TargetContainerName { get; set; } - - /// Optional: Path at which the file to which the container's termination message - /// will be written is mounted into the container's filesystem. - /// Message written is intended to be brief final status, such as an assertion failure message. - /// Will be truncated by the node if greater than 4096 bytes. The total message length across - /// all containers will be limited to 12kb. - /// Defaults to /dev/termination-log. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("terminationMessagePath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TerminationMessagePath { get; set; } - - [Newtonsoft.Json.JsonProperty("terminationMessagePolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TerminationMessagePolicy { get; set; } - - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - /// Default is false. - /// +optional - [Newtonsoft.Json.JsonProperty("tty", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Tty { get; set; } - - /// volumeDevices is the list of block devices to be used by the container. - /// +patchMergeKey=devicePath - /// +patchStrategy=merge - /// +optional - [Newtonsoft.Json.JsonProperty("volumeDevices", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection VolumeDevices { get; set; } - - /// Pod volumes to mount into the container's filesystem. - /// Cannot be updated. - /// +optional - /// +patchMergeKey=mountPath - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("volumeMounts", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection VolumeMounts { get; set; } - - /// Container's working directory. - /// If not specified, the container runtime's default will be used, which - /// might be configured in the container image. - /// Cannot be updated. - /// +optional - [Newtonsoft.Json.JsonProperty("workingDir", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string WorkingDir { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1EphemeralVolumeSource - { - [Newtonsoft.Json.JsonProperty("volumeClaimTemplate", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PersistentVolumeClaimTemplate VolumeClaimTemplate { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ExecAction - { - /// Command is the command line to execute inside the container, the working directory for the - /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is - /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - /// a shell, you need to explicitly call out to that shell. - /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - /// +optional - [Newtonsoft.Json.JsonProperty("command", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Command { get; set; } - - - } - - /// Fibre Channel volumes can only be mounted as read/write once. - /// Fibre Channel volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1FCVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// TODO: how do we prevent errors in the filesystem from compromising the machine - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Optional: FC target lun number - /// +optional - [Newtonsoft.Json.JsonProperty("lun", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Lun { get; set; } - - /// Optional: Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Optional: FC target worldwide names (WWNs) - /// +optional - [Newtonsoft.Json.JsonProperty("targetWWNs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection TargetWWNs { get; set; } - - /// Optional: FC volume world wide identifiers (wwids) - /// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - /// +optional - [Newtonsoft.Json.JsonProperty("wwids", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Wwids { get; set; } - - - } - - /// Each key is either a '.' representing the field itself, and will always map to an empty set, - /// or a string representing a sub-field or item. The string will follow one of these four formats: - /// 'f:<name>', where <name> is the name of a field in a struct, or key in a map - /// 'v:<value>', where <value> is the exact json formatted value of a list item - /// 'i:<index>', where <index> is position of a item in a list - /// 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values - /// If a key maps to an empty Fields value, the field that key represents is part of the set. - /// - /// The exact format is defined in sigs.k8s.io/structured-merge-diff - /// +protobuf.options.(gogoproto.goproto_stringer)=false - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1FieldsV1 - { - - } - - /// FlexVolume represents a generic volume resource that is - /// provisioned/attached using an exec based plugin. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1FlexVolumeSource - { - /// Driver is the name of the driver to use for this volume. - [Newtonsoft.Json.JsonProperty("driver", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Driver { get; set; } - - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Optional: Extra command options if any. - /// +optional - [Newtonsoft.Json.JsonProperty("options", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Options { get; set; } - - /// Optional: Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - - } - - /// One and only one of datasetName and datasetUUID should be set. - /// Flocker volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1FlockerVolumeSource - { - /// Name of the dataset stored as metadata -> name on the dataset for Flocker - /// should be considered as deprecated - /// +optional - [Newtonsoft.Json.JsonProperty("datasetName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DatasetName { get; set; } - - /// UUID of the dataset. This is unique identifier of a Flocker dataset - /// +optional - [Newtonsoft.Json.JsonProperty("datasetUUID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DatasetUUID { get; set; } - - - } - - /// A GCE PD must exist before mounting to a container. The disk must - /// also be in the same GCE project and zone as the kubelet. A GCE PD - /// can only be mounted as read/write once or read-only many times. GCE - /// PDs support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1GCEPersistentDiskVolumeSource - { - /// Filesystem type of the volume that you want to mount. - /// Tip: Ensure that the filesystem type is supported by the host operating system. - /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - /// TODO: how do we prevent errors in the filesystem from compromising the machine - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// The partition in the volume that you want to mount. - /// If omitted, the default is to mount by volume name. - /// Examples: For volume /dev/sda1, you specify the partition as "1". - /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - /// +optional - [Newtonsoft.Json.JsonProperty("partition", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Partition { get; set; } - - /// Unique name of the PD resource in GCE. Used to identify the disk in GCE. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - [Newtonsoft.Json.JsonProperty("pdName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PdName { get; set; } - - /// ReadOnly here will force the ReadOnly setting in VolumeMounts. - /// Defaults to false. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - - } - - /// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - /// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - /// into the Pod's container. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1GitRepoVolumeSource - { - /// Target directory name. - /// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - /// git repository. Otherwise, if specified, the volume will contain the git repository in - /// the subdirectory with the given name. - /// +optional - [Newtonsoft.Json.JsonProperty("directory", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Directory { get; set; } - - /// Repository URL - [Newtonsoft.Json.JsonProperty("repository", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Repository { get; set; } - - /// Commit hash for the specified revision. - /// +optional - [Newtonsoft.Json.JsonProperty("revision", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Revision { get; set; } - - - } - - /// Glusterfs volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1GlusterfsVolumeSource - { - /// EndpointsName is the endpoint name that details Glusterfs topology. - /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - [Newtonsoft.Json.JsonProperty("endpoints", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Endpoints { get; set; } - - /// Path is the Glusterfs volume path. - /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - /// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - /// Defaults to false. - /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HTTPGetAction - { - /// Host name to connect to, defaults to the pod IP. You probably want to set - /// "Host" in httpHeaders instead. - /// +optional - [Newtonsoft.Json.JsonProperty("host", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Host { get; set; } - - /// Custom headers to set in the request. HTTP allows repeated headers. - /// +optional - [Newtonsoft.Json.JsonProperty("httpHeaders", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection HttpHeaders { get; set; } - - /// Path to access on the HTTP server. - /// +optional - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - [Newtonsoft.Json.JsonProperty("port", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public IntstrIntOrString Port { get; set; } - - [Newtonsoft.Json.JsonProperty("scheme", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Scheme { get; set; } - - - } - - /// HTTPHeader describes a custom header to be used in HTTP probes - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HTTPHeader - { - /// The header field name - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// The header field value - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Value { get; set; } - - - } - - /// HTTPIngressPath associates a path with a backend. Incoming urls matching the - /// path are forwarded to the backend. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HTTPIngressPath - { - /// Backend defines the referenced service endpoint to which the traffic - /// will be forwarded to. - [Newtonsoft.Json.JsonProperty("backend", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressBackend Backend { get; set; } - - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - /// PathType determines the interpretation of the Path matching. PathType can - /// be one of the following values: - /// * Exact: Matches the URL path exactly. - /// * Prefix: Matches based on a URL path prefix split by '/'. Matching is - /// done on a path element by element basis. A path element refers is the - /// list of labels in the path split by the '/' separator. A request is a - /// match for path p if every p is an element-wise prefix of p of the - /// request path. Note that if the last element of the path is a substring - /// of the last element in request path, it is not a match (e.g. /foo/bar - /// matches /foo/bar/baz, but does not match /foo/barbaz). - /// * ImplementationSpecific: Interpretation of the Path matching is up to - /// the IngressClass. Implementations can treat this as a separate PathType - /// or treat it identically to Prefix or Exact path types. - /// Implementations are required to support all path types. - [Newtonsoft.Json.JsonProperty("pathType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PathType { get; set; } - - - } - - /// HTTPIngressRuleValue is a list of http selectors pointing to backends. - /// In the example: http://<host>/<path>?<searchpart> -> backend where - /// where parts of the url correspond to RFC 3986, this resource will be used - /// to match against everything after the last '/' and before the first '?' - /// or '#'. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HTTPIngressRuleValue - { - [Newtonsoft.Json.JsonProperty("paths", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Paths { get; set; } - - - } - - /// Handler defines a specific action that should be taken - /// TODO: pass structured data to these actions, and document that data here. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Handler - { - [Newtonsoft.Json.JsonProperty("exec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ExecAction Exec { get; set; } - - [Newtonsoft.Json.JsonProperty("httpGet", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1HTTPGetAction HttpGet { get; set; } - - [Newtonsoft.Json.JsonProperty("tcpSocket", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1TCPSocketAction TcpSocket { get; set; } - - - } - - /// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - /// pod's hosts file. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HostAlias - { - /// Hostnames for the above IP address. - [Newtonsoft.Json.JsonProperty("hostnames", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Hostnames { get; set; } - - /// IP address of the host file entry. - [Newtonsoft.Json.JsonProperty("ip", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Ip { get; set; } - - - } - - /// Host path volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1HostPathVolumeSource - { - /// Path of the directory on the host. - /// If the path is a symlink, it will follow the link to the real path. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Type { get; set; } - - - } - - /// ISCSI volumes can only be mounted as read/write once. - /// ISCSI volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ISCSIVolumeSource - { - /// whether support iSCSI Discovery CHAP authentication - /// +optional - [Newtonsoft.Json.JsonProperty("chapAuthDiscovery", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ChapAuthDiscovery { get; set; } - - /// whether support iSCSI Session CHAP authentication - /// +optional - [Newtonsoft.Json.JsonProperty("chapAuthSession", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ChapAuthSession { get; set; } - - /// Filesystem type of the volume that you want to mount. - /// Tip: Ensure that the filesystem type is supported by the host operating system. - /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - /// TODO: how do we prevent errors in the filesystem from compromising the machine - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Custom iSCSI Initiator Name. - /// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - /// <target portal>:<volume name> will be created for the connection. - /// +optional - [Newtonsoft.Json.JsonProperty("initiatorName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string InitiatorName { get; set; } - - /// Target iSCSI Qualified Name. - [Newtonsoft.Json.JsonProperty("iqn", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Iqn { get; set; } - - /// iSCSI Interface Name that uses an iSCSI transport. - /// Defaults to 'default' (tcp). - /// +optional - [Newtonsoft.Json.JsonProperty("iscsiInterface", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string IscsiInterface { get; set; } - - /// iSCSI Target Lun number. - [Newtonsoft.Json.JsonProperty("lun", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Lun { get; set; } - - /// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - /// is other than default (typically TCP ports 860 and 3260). - /// +optional - [Newtonsoft.Json.JsonProperty("portals", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Portals { get; set; } - - /// ReadOnly here will force the ReadOnly setting in VolumeMounts. - /// Defaults to false. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - /// is other than default (typically TCP ports 860 and 3260). - [Newtonsoft.Json.JsonProperty("targetPortal", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TargetPortal { get; set; } - - - } - - /// Ingress is a collection of rules that allow inbound connections to reach the - /// endpoints defined by a backend. An Ingress can be configured to give services - /// externally-reachable urls, load balance traffic, terminate SSL, offer name - /// based virtual hosting etc. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Ingress - { - [Newtonsoft.Json.JsonProperty("metadata", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ObjectMeta Metadata { get; set; } - - [Newtonsoft.Json.JsonProperty("spec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressSpec Spec { get; set; } - - [Newtonsoft.Json.JsonProperty("status", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressStatus Status { get; set; } - - - } - - /// IngressBackend describes all endpoints for a given service and port. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressBackend - { - [Newtonsoft.Json.JsonProperty("resource", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1TypedLocalObjectReference Resource { get; set; } - - [Newtonsoft.Json.JsonProperty("service", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressServiceBackend Service { get; set; } - - - } - - /// IngressRule represents the rules mapping the paths under a specified host to - /// the related backend services. Incoming requests are first evaluated for a host - /// match, then routed to the backend associated with the matching IngressRuleValue. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressRule - { - /// Host is the fully qualified domain name of a network host, as defined by RFC 3986. - /// Note the following deviations from the "host" part of the - /// URI as defined in RFC 3986: - /// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - /// the IP in the Spec of the parent Ingress. - /// 2. The `:` delimiter is not respected because ports are not allowed. - /// Currently the port of an Ingress is implicitly :80 for http and - /// :443 for https. - /// Both these may change in the future. - /// Incoming requests are matched against the host before the - /// IngressRuleValue. If the host is unspecified, the Ingress routes all - /// traffic based on the specified IngressRuleValue. - /// - /// Host can be "precise" which is a domain name without the terminating dot of - /// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - /// prefixed with a single wildcard label (e.g. "*.foo.com"). - /// The wildcard character '*' must appear by itself as the first DNS label and - /// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - /// Requests will be matched against the Host field in the following way: - /// 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - /// 2. If Host is a wildcard, then the request matches this rule if the http host header - /// is to equal to the suffix (removing the first label) of the wildcard rule. - /// +optional - [Newtonsoft.Json.JsonProperty("host", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Host { get; set; } - - [Newtonsoft.Json.JsonProperty("ingressRuleValue", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressRuleValue IngressRuleValue { get; set; } - - - } - - /// IngressRuleValue represents a rule to apply against incoming requests. If the - /// rule is satisfied, the request is routed to the specified backend. Currently - /// mixing different types of rules in a single Ingress is disallowed, so exactly - /// one of the following must be set. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressRuleValue - { - [Newtonsoft.Json.JsonProperty("http", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1HTTPIngressRuleValue Http { get; set; } - - - } - - /// IngressServiceBackend references a Kubernetes Service as a Backend. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressServiceBackend - { - /// Name is the referenced service. The service must exist in - /// the same namespace as the Ingress object. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Port of the referenced service. A port name or port number - /// is required for a IngressServiceBackend. - [Newtonsoft.Json.JsonProperty("port", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ServiceBackendPort Port { get; set; } - - - } - - /// IngressSpec describes the Ingress the user wishes to exist. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressSpec - { - [Newtonsoft.Json.JsonProperty("defaultBackend", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1IngressBackend DefaultBackend { get; set; } - - [Newtonsoft.Json.JsonProperty("ingressClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string IngressClassName { get; set; } - - [Newtonsoft.Json.JsonProperty("rules", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Rules { get; set; } - - [Newtonsoft.Json.JsonProperty("tls", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Tls { get; set; } - - - } - - /// IngressStatus describe the current state of the Ingress. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressStatus - { - [Newtonsoft.Json.JsonProperty("loadBalancer", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LoadBalancerStatus LoadBalancer { get; set; } - - - } - - /// IngressTLS describes the transport layer security associated with an Ingress. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1IngressTLS - { - [Newtonsoft.Json.JsonProperty("hosts", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Hosts { get; set; } - - [Newtonsoft.Json.JsonProperty("secretName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SecretName { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1KeyToPath - { - /// The key to project. - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - /// Optional: mode bits used to set permissions on this file. - /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - /// If not specified, the volume defaultMode will be used. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("mode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Mode { get; set; } - - /// The relative path of the file to map the key to. - /// May not be an absolute path. - /// May not contain the path element '..'. - /// May not start with the string '..'. - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - - } - - /// A label selector is a label query over a set of resources. The result of matchLabels and - /// matchExpressions are ANDed. An empty label selector matches all objects. A null - /// label selector matches no objects. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1LabelSelector - { - /// matchExpressions is a list of label selector requirements. The requirements are ANDed. - /// +optional - [Newtonsoft.Json.JsonProperty("matchExpressions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection MatchExpressions { get; set; } - - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - /// map is equivalent to an element of matchExpressions, whose key field is "key", the - /// operator is "In", and the values array contains only "value". The requirements are ANDed. - /// +optional - [Newtonsoft.Json.JsonProperty("matchLabels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary MatchLabels { get; set; } - - - } - - /// A label selector requirement is a selector that contains values, a key, and an operator that - /// relates the key and values. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1LabelSelectorRequirement - { - /// key is the label key that the selector applies to. - /// +patchMergeKey=key - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - [Newtonsoft.Json.JsonProperty("operator", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Operator { get; set; } - - /// values is an array of string values. If the operator is In or NotIn, - /// the values array must be non-empty. If the operator is Exists or DoesNotExist, - /// the values array must be empty. This array is replaced during a strategic - /// merge patch. - /// +optional - [Newtonsoft.Json.JsonProperty("values", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Values { get; set; } - - - } - - /// Lifecycle describes actions that the management system should take in response to container lifecycle - /// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks - /// until the action is complete, unless the container process fails, in which case the handler is aborted. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Lifecycle - { - [Newtonsoft.Json.JsonProperty("postStart", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Handler PostStart { get; set; } - - [Newtonsoft.Json.JsonProperty("preStop", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Handler PreStop { get; set; } - - - } - - /// LoadBalancerIngress represents the status of a load-balancer ingress point: - /// traffic intended for the service should be sent to an ingress point. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1LoadBalancerIngress - { - [Newtonsoft.Json.JsonProperty("hostname", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Hostname { get; set; } - - [Newtonsoft.Json.JsonProperty("ip", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Ip { get; set; } - - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - - } - - /// LoadBalancerStatus represents the status of a load-balancer. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1LoadBalancerStatus - { - [Newtonsoft.Json.JsonProperty("ingress", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ingress { get; set; } - - - } - - /// LocalObjectReference contains enough information to let you locate the - /// referenced object inside the same namespace. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1LocalObjectReference - { - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - - } - - /// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource - /// that the fieldset applies to. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ManagedFieldsEntry - { - /// APIVersion defines the version of this resource that this field set - /// applies to. The format is "group/version" just like the top-level - /// APIVersion field. It is necessary to track the version of a field - /// set because it cannot be automatically converted. - [Newtonsoft.Json.JsonProperty("apiVersion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ApiVersion { get; set; } - - /// FieldsType is the discriminator for the different fields format and version. - /// There is currently only one possible value: "FieldsV1" - [Newtonsoft.Json.JsonProperty("fieldsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FieldsType { get; set; } - - [Newtonsoft.Json.JsonProperty("fieldsV1", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1FieldsV1 FieldsV1 { get; set; } - - /// Manager is an identifier of the workflow managing these fields. - [Newtonsoft.Json.JsonProperty("manager", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Manager { get; set; } - - [Newtonsoft.Json.JsonProperty("operation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Operation { get; set; } - - /// Subresource is the name of the subresource used to update that object, or - /// empty string if the object was updated through the main resource. The - /// value of this field is used to distinguish between managers, even if they - /// share the same name. For example, a status update will be distinct from a - /// regular update using the same manager name. - /// Note that the APIVersion field is not related to the Subresource field and - /// it always corresponds to the version of the main resource. - [Newtonsoft.Json.JsonProperty("subresource", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Subresource { get; set; } - - [Newtonsoft.Json.JsonProperty("time", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? Time { get; set; } - - - } - - /// NFS volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1NFSVolumeSource - { - /// Path that is exported by the NFS server. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - /// ReadOnly here will force - /// the NFS export to be mounted with read-only permissions. - /// Defaults to false. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Server is the hostname or IP address of the NFS server. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - [Newtonsoft.Json.JsonProperty("server", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Server { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1NodeAffinity - { - /// The scheduler will prefer to schedule pods to nodes that satisfy - /// the affinity expressions specified by this field, but it may choose - /// a node that violates one or more of the expressions. The node that is - /// most preferred is the one with the greatest sum of weights, i.e. - /// for each node that meets all of the scheduling requirements (resource - /// request, requiredDuringScheduling affinity expressions, etc.), - /// compute a sum by iterating through the elements of this field and adding - /// "weight" to the sum if the node matches the corresponding matchExpressions; the - /// node(s) with the highest sum are the most preferred. - /// +optional - [Newtonsoft.Json.JsonProperty("preferredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection PreferredDuringSchedulingIgnoredDuringExecution { get; set; } - - [Newtonsoft.Json.JsonProperty("requiredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1NodeSelector RequiredDuringSchedulingIgnoredDuringExecution { get; set; } - - - } - - /// A node selector represents the union of the results of one or more label queries - /// over a set of nodes; that is, it represents the OR of the selectors represented - /// by the node selector terms. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1NodeSelector - { - /// Required. A list of node selector terms. The terms are ORed. - [Newtonsoft.Json.JsonProperty("nodeSelectorTerms", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection NodeSelectorTerms { get; set; } - - - } - - /// A node selector requirement is a selector that contains values, a key, and an operator - /// that relates the key and values. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1NodeSelectorRequirement - { - /// The label key that the selector applies to. - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - [Newtonsoft.Json.JsonProperty("operator", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Operator { get; set; } - - /// An array of string values. If the operator is In or NotIn, - /// the values array must be non-empty. If the operator is Exists or DoesNotExist, - /// the values array must be empty. If the operator is Gt or Lt, the values - /// array must have a single element, which will be interpreted as an integer. - /// This array is replaced during a strategic merge patch. - /// +optional - [Newtonsoft.Json.JsonProperty("values", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Values { get; set; } - - - } - - /// A null or empty node selector term matches no objects. The requirements of - /// them are ANDed. - /// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1NodeSelectorTerm - { - /// A list of node selector requirements by node's labels. - /// +optional - [Newtonsoft.Json.JsonProperty("matchExpressions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection MatchExpressions { get; set; } - - /// A list of node selector requirements by node's fields. - /// +optional - [Newtonsoft.Json.JsonProperty("matchFields", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection MatchFields { get; set; } - - - } - - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ObjectFieldSelector - { - /// Version of the schema the FieldPath is written in terms of, defaults to "v1". - /// +optional - [Newtonsoft.Json.JsonProperty("apiVersion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ApiVersion { get; set; } - - /// Path of the field to select in the specified API version. - [Newtonsoft.Json.JsonProperty("fieldPath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FieldPath { get; set; } - - - } - - /// ObjectMeta is metadata that all persisted resources must have, which includes all objects - /// users must create. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ObjectMeta - { - /// Annotations is an unstructured key value map stored with a resource that may be - /// set by external tools to store and retrieve arbitrary metadata. They are not - /// queryable and should be preserved when modifying objects. - /// More info: http://kubernetes.io/docs/user-guide/annotations - /// +optional - [Newtonsoft.Json.JsonProperty("annotations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Annotations { get; set; } - - /// The name of the cluster which the object belongs to. - /// This is used to distinguish resources with same name and namespace in different clusters. - /// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - /// +optional - [Newtonsoft.Json.JsonProperty("clusterName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterName { get; set; } - - [Newtonsoft.Json.JsonProperty("creationTimestamp", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? CreationTimestamp { get; set; } - - /// Number of seconds allowed for this object to gracefully terminate before - /// it will be removed from the system. Only set when deletionTimestamp is also set. - /// May only be shortened. - /// Read-only. - /// +optional - [Newtonsoft.Json.JsonProperty("deletionGracePeriodSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? DeletionGracePeriodSeconds { get; set; } - - [Newtonsoft.Json.JsonProperty("deletionTimestamp", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? DeletionTimestamp { get; set; } - - /// Must be empty before the object is deleted from the registry. Each entry - /// is an identifier for the responsible component that will remove the entry - /// from the list. If the deletionTimestamp of the object is non-nil, entries - /// in this list can only be removed. - /// Finalizers may be processed and removed in any order. Order is NOT enforced - /// because it introduces significant risk of stuck finalizers. - /// finalizers is a shared field, any actor with permission can reorder it. - /// If the finalizer list is processed in order, then this can lead to a situation - /// in which the component responsible for the first finalizer in the list is - /// waiting for a signal (field value, external system, or other) produced by a - /// component responsible for a finalizer later in the list, resulting in a deadlock. - /// Without enforced ordering finalizers are free to order amongst themselves and - /// are not vulnerable to ordering changes in the list. - /// +optional - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("finalizers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Finalizers { get; set; } - - /// GenerateName is an optional prefix, used by the server, to generate a unique - /// name ONLY IF the Name field has not been provided. - /// If this field is used, the name returned to the client will be different - /// than the name passed. This value will also be combined with a unique suffix. - /// The provided value has the same validation rules as the Name field, - /// and may be truncated by the length of the suffix required to make the value - /// unique on the server. - /// - /// If this field is specified and the generated name exists, the server will - /// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - /// ServerTimeout indicating a unique name could not be found in the time allotted, and the client - /// should retry (optionally after the time indicated in the Retry-After header). - /// - /// Applied only if Name is not specified. - /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - /// +optional - [Newtonsoft.Json.JsonProperty("generateName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string GenerateName { get; set; } - - /// A sequence number representing a specific generation of the desired state. - /// Populated by the system. Read-only. - /// +optional - [Newtonsoft.Json.JsonProperty("generation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? Generation { get; set; } - - /// Map of string keys and values that can be used to organize and categorize - /// (scope and select) objects. May match selectors of replication controllers - /// and services. - /// More info: http://kubernetes.io/docs/user-guide/labels - /// +optional - [Newtonsoft.Json.JsonProperty("labels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Labels { get; set; } - - /// ManagedFields maps workflow-id and version to the set of fields - /// that are managed by that workflow. This is mostly for internal - /// housekeeping, and users typically shouldn't need to set or - /// understand this field. A workflow can be the user's name, a - /// controller's name, or the name of a specific apply path like - /// "ci-cd". The set of fields is always in the version that the - /// workflow used when modifying the object. - /// - /// +optional - [Newtonsoft.Json.JsonProperty("managedFields", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ManagedFields { get; set; } - - /// Name must be unique within a namespace. Is required when creating resources, although - /// some resources may allow a client to request the generation of an appropriate name - /// automatically. Name is primarily intended for creation idempotence and configuration - /// definition. - /// Cannot be updated. - /// More info: http://kubernetes.io/docs/user-guide/identifiers#names - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Namespace defines the space within which each name must be unique. An empty namespace is - /// equivalent to the "default" namespace, but "default" is the canonical representation. - /// Not all objects are required to be scoped to a namespace - the value of this field for - /// those objects will be empty. - /// - /// Must be a DNS_LABEL. - /// Cannot be updated. - /// More info: http://kubernetes.io/docs/user-guide/namespaces - /// +optional - [Newtonsoft.Json.JsonProperty("namespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Namespace { get; set; } - - /// List of objects depended by this object. If ALL objects in the list have - /// been deleted, this object will be garbage collected. If this object is managed by a controller, - /// then an entry in this list will point to this controller, with the controller field set to true. - /// There cannot be more than one managing controller. - /// +optional - /// +patchMergeKey=uid - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("ownerReferences", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection OwnerReferences { get; set; } - - /// An opaque value that represents the internal version of this object that can - /// be used by clients to determine when objects have changed. May be used for optimistic - /// concurrency, change detection, and the watch operation on a resource or set of resources. - /// Clients must treat these values as opaque and passed unmodified back to the server. - /// They may only be valid for a particular resource or set of resources. - /// - /// Populated by the system. - /// Read-only. - /// Value must be treated as opaque by clients and . - /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - /// +optional - [Newtonsoft.Json.JsonProperty("resourceVersion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ResourceVersion { get; set; } - - /// SelfLink is a URL representing this object. - /// Populated by the system. - /// Read-only. - /// - /// DEPRECATED - /// Kubernetes will stop propagating this field in 1.20 release and the field is planned - /// to be removed in 1.21 release. - /// +optional - [Newtonsoft.Json.JsonProperty("selfLink", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SelfLink { get; set; } - - [Newtonsoft.Json.JsonProperty("uid", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Uid { get; set; } - - - } - - /// OwnerReference contains enough information to let you identify an owning - /// object. An owning object must be in the same namespace as the dependent, or - /// be cluster-scoped, so there is no namespace field. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1OwnerReference - { - /// API version of the referent. - [Newtonsoft.Json.JsonProperty("apiVersion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ApiVersion { get; set; } - - /// If true, AND if the owner has the "foregroundDeletion" finalizer, then - /// the owner cannot be deleted from the key-value store until this - /// reference is removed. - /// Defaults to false. - /// To set this field, a user needs "delete" permission of the owner, - /// otherwise 422 (Unprocessable Entity) will be returned. - /// +optional - [Newtonsoft.Json.JsonProperty("blockOwnerDeletion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? BlockOwnerDeletion { get; set; } - - /// If true, this reference points to the managing controller. - /// +optional - [Newtonsoft.Json.JsonProperty("controller", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Controller { get; set; } - - /// Kind of the referent. - /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - [Newtonsoft.Json.JsonProperty("kind", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Kind { get; set; } - - /// Name of the referent. - /// More info: http://kubernetes.io/docs/user-guide/identifiers#names - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("uid", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Uid { get; set; } - - - } - - /// PersistentVolumeClaimSpec describes the common attributes of storage devices - /// and allows a Source for provider-specific attributes - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PersistentVolumeClaimSpec - { - /// AccessModes contains the desired access modes the volume should have. - /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - /// +optional - [Newtonsoft.Json.JsonProperty("accessModes", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection AccessModes { get; set; } - - [Newtonsoft.Json.JsonProperty("dataSource", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1TypedLocalObjectReference DataSource { get; set; } - - [Newtonsoft.Json.JsonProperty("dataSourceRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1TypedLocalObjectReference DataSourceRef { get; set; } - - [Newtonsoft.Json.JsonProperty("resources", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceRequirements Resources { get; set; } - - [Newtonsoft.Json.JsonProperty("selector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LabelSelector Selector { get; set; } - - /// Name of the StorageClass required by the claim. - /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - /// +optional - [Newtonsoft.Json.JsonProperty("storageClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StorageClassName { get; set; } - - [Newtonsoft.Json.JsonProperty("volumeMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeMode { get; set; } - - /// VolumeName is the binding reference to the PersistentVolume backing this claim. - /// +optional - [Newtonsoft.Json.JsonProperty("volumeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeName { get; set; } - - - } - - /// PersistentVolumeClaimTemplate is used to produce - /// PersistentVolumeClaim objects as part of an EphemeralVolumeSource. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PersistentVolumeClaimTemplate - { - /// Annotations is an unstructured key value map stored with a resource that may be - /// set by external tools to store and retrieve arbitrary metadata. They are not - /// queryable and should be preserved when modifying objects. - /// More info: http://kubernetes.io/docs/user-guide/annotations - /// +optional - [Newtonsoft.Json.JsonProperty("annotations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Annotations { get; set; } - - /// The name of the cluster which the object belongs to. - /// This is used to distinguish resources with same name and namespace in different clusters. - /// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - /// +optional - [Newtonsoft.Json.JsonProperty("clusterName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterName { get; set; } - - [Newtonsoft.Json.JsonProperty("creationTimestamp", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? CreationTimestamp { get; set; } - - /// Number of seconds allowed for this object to gracefully terminate before - /// it will be removed from the system. Only set when deletionTimestamp is also set. - /// May only be shortened. - /// Read-only. - /// +optional - [Newtonsoft.Json.JsonProperty("deletionGracePeriodSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? DeletionGracePeriodSeconds { get; set; } - - [Newtonsoft.Json.JsonProperty("deletionTimestamp", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.DateTimeOffset? DeletionTimestamp { get; set; } - - /// Must be empty before the object is deleted from the registry. Each entry - /// is an identifier for the responsible component that will remove the entry - /// from the list. If the deletionTimestamp of the object is non-nil, entries - /// in this list can only be removed. - /// Finalizers may be processed and removed in any order. Order is NOT enforced - /// because it introduces significant risk of stuck finalizers. - /// finalizers is a shared field, any actor with permission can reorder it. - /// If the finalizer list is processed in order, then this can lead to a situation - /// in which the component responsible for the first finalizer in the list is - /// waiting for a signal (field value, external system, or other) produced by a - /// component responsible for a finalizer later in the list, resulting in a deadlock. - /// Without enforced ordering finalizers are free to order amongst themselves and - /// are not vulnerable to ordering changes in the list. - /// +optional - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("finalizers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Finalizers { get; set; } - - /// GenerateName is an optional prefix, used by the server, to generate a unique - /// name ONLY IF the Name field has not been provided. - /// If this field is used, the name returned to the client will be different - /// than the name passed. This value will also be combined with a unique suffix. - /// The provided value has the same validation rules as the Name field, - /// and may be truncated by the length of the suffix required to make the value - /// unique on the server. - /// - /// If this field is specified and the generated name exists, the server will - /// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - /// ServerTimeout indicating a unique name could not be found in the time allotted, and the client - /// should retry (optionally after the time indicated in the Retry-After header). - /// - /// Applied only if Name is not specified. - /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - /// +optional - [Newtonsoft.Json.JsonProperty("generateName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string GenerateName { get; set; } - - /// A sequence number representing a specific generation of the desired state. - /// Populated by the system. Read-only. - /// +optional - [Newtonsoft.Json.JsonProperty("generation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? Generation { get; set; } - - /// Map of string keys and values that can be used to organize and categorize - /// (scope and select) objects. May match selectors of replication controllers - /// and services. - /// More info: http://kubernetes.io/docs/user-guide/labels - /// +optional - [Newtonsoft.Json.JsonProperty("labels", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Labels { get; set; } - - /// ManagedFields maps workflow-id and version to the set of fields - /// that are managed by that workflow. This is mostly for internal - /// housekeeping, and users typically shouldn't need to set or - /// understand this field. A workflow can be the user's name, a - /// controller's name, or the name of a specific apply path like - /// "ci-cd". The set of fields is always in the version that the - /// workflow used when modifying the object. - /// - /// +optional - [Newtonsoft.Json.JsonProperty("managedFields", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ManagedFields { get; set; } - - /// Name must be unique within a namespace. Is required when creating resources, although - /// some resources may allow a client to request the generation of an appropriate name - /// automatically. Name is primarily intended for creation idempotence and configuration - /// definition. - /// Cannot be updated. - /// More info: http://kubernetes.io/docs/user-guide/identifiers#names - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Namespace defines the space within which each name must be unique. An empty namespace is - /// equivalent to the "default" namespace, but "default" is the canonical representation. - /// Not all objects are required to be scoped to a namespace - the value of this field for - /// those objects will be empty. - /// - /// Must be a DNS_LABEL. - /// Cannot be updated. - /// More info: http://kubernetes.io/docs/user-guide/namespaces - /// +optional - [Newtonsoft.Json.JsonProperty("namespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Namespace { get; set; } - - /// List of objects depended by this object. If ALL objects in the list have - /// been deleted, this object will be garbage collected. If this object is managed by a controller, - /// then an entry in this list will point to this controller, with the controller field set to true. - /// There cannot be more than one managing controller. - /// +optional - /// +patchMergeKey=uid - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("ownerReferences", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection OwnerReferences { get; set; } - - /// An opaque value that represents the internal version of this object that can - /// be used by clients to determine when objects have changed. May be used for optimistic - /// concurrency, change detection, and the watch operation on a resource or set of resources. - /// Clients must treat these values as opaque and passed unmodified back to the server. - /// They may only be valid for a particular resource or set of resources. - /// - /// Populated by the system. - /// Read-only. - /// Value must be treated as opaque by clients and . - /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - /// +optional - [Newtonsoft.Json.JsonProperty("resourceVersion", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ResourceVersion { get; set; } - - /// SelfLink is a URL representing this object. - /// Populated by the system. - /// Read-only. - /// - /// DEPRECATED - /// Kubernetes will stop propagating this field in 1.20 release and the field is planned - /// to be removed in 1.21 release. - /// +optional - [Newtonsoft.Json.JsonProperty("selfLink", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SelfLink { get; set; } - - [Newtonsoft.Json.JsonProperty("spec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PersistentVolumeClaimSpec Spec { get; set; } - - [Newtonsoft.Json.JsonProperty("uid", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Uid { get; set; } - - - } - - /// This volume finds the bound PV and mounts that volume for the pod. A - /// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another - /// type of volume that is owned by someone else (the system). - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PersistentVolumeClaimVolumeSource - { - /// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - [Newtonsoft.Json.JsonProperty("claimName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClaimName { get; set; } - - /// Will force the ReadOnly setting in VolumeMounts. - /// Default false. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PhotonPersistentDiskVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// ID that identifies Photon Controller persistent disk - [Newtonsoft.Json.JsonProperty("pdID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PdID { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodAffinity - { - /// The scheduler will prefer to schedule pods to nodes that satisfy - /// the affinity expressions specified by this field, but it may choose - /// a node that violates one or more of the expressions. The node that is - /// most preferred is the one with the greatest sum of weights, i.e. - /// for each node that meets all of the scheduling requirements (resource - /// request, requiredDuringScheduling affinity expressions, etc.), - /// compute a sum by iterating through the elements of this field and adding - /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - /// node(s) with the highest sum are the most preferred. - /// +optional - [Newtonsoft.Json.JsonProperty("preferredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection PreferredDuringSchedulingIgnoredDuringExecution { get; set; } - - /// If the affinity requirements specified by this field are not met at - /// scheduling time, the pod will not be scheduled onto the node. - /// If the affinity requirements specified by this field cease to be met - /// at some point during pod execution (e.g. due to a pod label update), the - /// system may or may not try to eventually evict the pod from its node. - /// When there are multiple elements, the lists of nodes corresponding to each - /// podAffinityTerm are intersected, i.e. all terms must be satisfied. - /// +optional - [Newtonsoft.Json.JsonProperty("requiredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection RequiredDuringSchedulingIgnoredDuringExecution { get; set; } - - - } - - /// Defines a set of pods (namely those matching the labelSelector - /// relative to the given namespace(s)) that this pod should be - /// co-located (affinity) or not co-located (anti-affinity) with, - /// where co-located is defined as running on a node whose value of - /// the label with key <topologyKey> matches that of any node on which - /// a pod of the set of pods is running - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodAffinityTerm - { - [Newtonsoft.Json.JsonProperty("labelSelector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LabelSelector LabelSelector { get; set; } - - [Newtonsoft.Json.JsonProperty("namespaceSelector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LabelSelector NamespaceSelector { get; set; } - - /// namespaces specifies a static list of namespace names that the term applies to. - /// The term is applied to the union of the namespaces listed in this field - /// and the ones selected by namespaceSelector. - /// null or empty namespaces list and null namespaceSelector means "this pod's namespace" - /// +optional - [Newtonsoft.Json.JsonProperty("namespaces", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Namespaces { get; set; } - - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - /// the labelSelector in the specified namespaces, where co-located is defined as running on a node - /// whose value of the label with key topologyKey matches that of any node on which any of the - /// selected pods is running. - /// Empty topologyKey is not allowed. - [Newtonsoft.Json.JsonProperty("topologyKey", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TopologyKey { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodAntiAffinity - { - /// The scheduler will prefer to schedule pods to nodes that satisfy - /// the anti-affinity expressions specified by this field, but it may choose - /// a node that violates one or more of the expressions. The node that is - /// most preferred is the one with the greatest sum of weights, i.e. - /// for each node that meets all of the scheduling requirements (resource - /// request, requiredDuringScheduling anti-affinity expressions, etc.), - /// compute a sum by iterating through the elements of this field and adding - /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - /// node(s) with the highest sum are the most preferred. - /// +optional - [Newtonsoft.Json.JsonProperty("preferredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection PreferredDuringSchedulingIgnoredDuringExecution { get; set; } - - /// If the anti-affinity requirements specified by this field are not met at - /// scheduling time, the pod will not be scheduled onto the node. - /// If the anti-affinity requirements specified by this field cease to be met - /// at some point during pod execution (e.g. due to a pod label update), the - /// system may or may not try to eventually evict the pod from its node. - /// When there are multiple elements, the lists of nodes corresponding to each - /// podAffinityTerm are intersected, i.e. all terms must be satisfied. - /// +optional - [Newtonsoft.Json.JsonProperty("requiredDuringSchedulingIgnoredDuringExecution", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection RequiredDuringSchedulingIgnoredDuringExecution { get; set; } - - - } - - /// PodDNSConfig defines the DNS parameters of a pod in addition to - /// those generated from DNSPolicy. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodDNSConfig - { - /// A list of DNS name server IP addresses. - /// This will be appended to the base nameservers generated from DNSPolicy. - /// Duplicated nameservers will be removed. - /// +optional - [Newtonsoft.Json.JsonProperty("nameservers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Nameservers { get; set; } - - /// A list of DNS resolver options. - /// This will be merged with the base options generated from DNSPolicy. - /// Duplicated entries will be removed. Resolution options given in Options - /// will override those that appear in the base DNSPolicy. - /// +optional - [Newtonsoft.Json.JsonProperty("options", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Options { get; set; } - - /// A list of DNS search domains for host-name lookup. - /// This will be appended to the base search paths generated from DNSPolicy. - /// Duplicated search paths will be removed. - /// +optional - [Newtonsoft.Json.JsonProperty("searches", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Searches { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodDNSConfigOption - { - /// Required. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// +optional - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Value { get; set; } - - - } - - /// PodReadinessGate contains the reference to a pod condition - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodReadinessGate - { - [Newtonsoft.Json.JsonProperty("conditionType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ConditionType { get; set; } - - - } - - /// Some fields are also present in container.securityContext. Field values of - /// container.securityContext take precedence over field values of PodSecurityContext. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodSecurityContext - { - /// A special supplemental group that applies to all containers in a pod. - /// Some volume types allow the Kubelet to change the ownership of that volume - /// to be owned by the pod: - /// - /// 1. The owning GID will be the FSGroup - /// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - /// 3. The permission bits are OR'd with rw-rw---- - /// - /// If unset, the Kubelet will not modify the ownership and permissions of any volume. - /// +optional - [Newtonsoft.Json.JsonProperty("fsGroup", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? FsGroup { get; set; } - - [Newtonsoft.Json.JsonProperty("fsGroupChangePolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsGroupChangePolicy { get; set; } - - /// The GID to run the entrypoint of the container process. - /// Uses runtime default if unset. - /// May also be set in SecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence - /// for that container. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsGroup", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? RunAsGroup { get; set; } - - /// Indicates that the container must run as a non-root user. - /// If true, the Kubelet will validate the image at runtime to ensure that it - /// does not run as UID 0 (root) and fail to start the container if it does. - /// If unset or false, no such validation will be performed. - /// May also be set in SecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsNonRoot", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? RunAsNonRoot { get; set; } - - /// The UID to run the entrypoint of the container process. - /// Defaults to user specified in image metadata if unspecified. - /// May also be set in SecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence - /// for that container. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsUser", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? RunAsUser { get; set; } - - [Newtonsoft.Json.JsonProperty("seLinuxOptions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SELinuxOptions SeLinuxOptions { get; set; } - - [Newtonsoft.Json.JsonProperty("seccompProfile", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SeccompProfile SeccompProfile { get; set; } - - /// A list of groups applied to the first process run in each container, in addition - /// to the container's primary GID. If unspecified, no groups will be added to - /// any container. - /// +optional - [Newtonsoft.Json.JsonProperty("supplementalGroups", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection SupplementalGroups { get; set; } - - /// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - /// sysctls (by the container runtime) might fail to launch. - /// +optional - [Newtonsoft.Json.JsonProperty("sysctls", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Sysctls { get; set; } - - [Newtonsoft.Json.JsonProperty("windowsOptions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1WindowsSecurityContextOptions WindowsOptions { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PodSpec - { - /// Optional duration in seconds the pod may be active on the node relative to - /// StartTime before the system will actively try to mark it failed and kill associated containers. - /// Value must be a positive integer. - /// +optional - [Newtonsoft.Json.JsonProperty("activeDeadlineSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? ActiveDeadlineSeconds { get; set; } - - [Newtonsoft.Json.JsonProperty("affinity", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Affinity Affinity { get; set; } - - /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - /// +optional - [Newtonsoft.Json.JsonProperty("automountServiceAccountToken", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? AutomountServiceAccountToken { get; set; } - - /// List of containers belonging to the pod. - /// Containers cannot currently be added or removed. - /// There must be at least one container in a Pod. - /// Cannot be updated. - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("containers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Containers { get; set; } - - [Newtonsoft.Json.JsonProperty("dnsConfig", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodDNSConfig DnsConfig { get; set; } - - [Newtonsoft.Json.JsonProperty("dnsPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DnsPolicy { get; set; } - - /// EnableServiceLinks indicates whether information about services should be injected into pod's - /// environment variables, matching the syntax of Docker links. - /// Optional: Defaults to true. - /// +optional - [Newtonsoft.Json.JsonProperty("enableServiceLinks", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? EnableServiceLinks { get; set; } - - /// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - /// pod to perform user-initiated actions such as debugging. This list cannot be specified when - /// creating a pod, and it cannot be modified by updating the pod spec. In order to add an - /// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - /// This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. - /// +optional - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("ephemeralContainers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection EphemeralContainers { get; set; } - - /// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - /// file if specified. This is only valid for non-hostNetwork pods. - /// +optional - /// +patchMergeKey=ip - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("hostAliases", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection HostAliases { get; set; } - - /// Use the host's ipc namespace. - /// Optional: Default to false. - /// +k8s:conversion-gen=false - /// +optional - [Newtonsoft.Json.JsonProperty("hostIPC", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? HostIPC { get; set; } - - /// Host networking requested for this pod. Use the host's network namespace. - /// If this option is set, the ports that will be used must be specified. - /// Default to false. - /// +k8s:conversion-gen=false - /// +optional - [Newtonsoft.Json.JsonProperty("hostNetwork", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? HostNetwork { get; set; } - - /// Use the host's pid namespace. - /// Optional: Default to false. - /// +k8s:conversion-gen=false - /// +optional - [Newtonsoft.Json.JsonProperty("hostPID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? HostPID { get; set; } - - /// Specifies the hostname of the Pod - /// If not specified, the pod's hostname will be set to a system-defined value. - /// +optional - [Newtonsoft.Json.JsonProperty("hostname", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Hostname { get; set; } - - /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - /// If specified, these secrets will be passed to individual puller implementations for them to use. For example, - /// in the case of docker, only DockerConfig type secrets are honored. - /// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - /// +optional - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("imagePullSecrets", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ImagePullSecrets { get; set; } - - /// List of initialization containers belonging to the pod. - /// Init containers are executed in order prior to containers being started. If any - /// init container fails, the pod is considered to have failed and is handled according - /// to its restartPolicy. The name for an init container or normal container must be - /// unique among all containers. - /// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - /// The resourceRequirements of an init container are taken into account during scheduling - /// by finding the highest request/limit for each resource type, and then using the max of - /// of that value or the sum of the normal containers. Limits are applied to init containers - /// in a similar fashion. - /// Init containers cannot currently be added or removed. - /// Cannot be updated. - /// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - /// +patchMergeKey=name - /// +patchStrategy=merge - [Newtonsoft.Json.JsonProperty("initContainers", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection InitContainers { get; set; } - - /// NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - /// the scheduler simply schedules this pod onto that node, assuming that it fits resource - /// requirements. - /// +optional - [Newtonsoft.Json.JsonProperty("nodeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string NodeName { get; set; } - - /// NodeSelector is a selector which must be true for the pod to fit on a node. - /// Selector which must match a node's labels for the pod to be scheduled on that node. - /// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - /// +optional - /// +mapType=atomic - [Newtonsoft.Json.JsonProperty("nodeSelector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary NodeSelector { get; set; } - - [Newtonsoft.Json.JsonProperty("overhead", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceList Overhead { get; set; } - - [Newtonsoft.Json.JsonProperty("preemptionPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PreemptionPolicy { get; set; } - - /// The priority value. Various system components use this field to find the - /// priority of the pod. When Priority Admission Controller is enabled, it - /// prevents users from setting this field. The admission controller populates - /// this field from PriorityClassName. - /// The higher the value, the higher the priority. - /// +optional - [Newtonsoft.Json.JsonProperty("priority", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Priority { get; set; } - - /// If specified, indicates the pod's priority. "system-node-critical" and - /// "system-cluster-critical" are two special keywords which indicate the - /// highest priorities with the former being the highest priority. Any other - /// name must be defined by creating a PriorityClass object with that name. - /// If not specified, the pod priority will be default or zero if there is no - /// default. - /// +optional - [Newtonsoft.Json.JsonProperty("priorityClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string PriorityClassName { get; set; } - - /// If specified, all readiness gates will be evaluated for pod readiness. - /// A pod is ready when all its containers are ready AND - /// all conditions specified in the readiness gates have status equal to "True" - /// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - /// +optional - [Newtonsoft.Json.JsonProperty("readinessGates", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ReadinessGates { get; set; } - - [Newtonsoft.Json.JsonProperty("restartPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string RestartPolicy { get; set; } - - /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - /// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - /// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - /// empty definition that uses the default runtime handler. - /// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - /// This is a beta feature as of Kubernetes v1.14. - /// +optional - [Newtonsoft.Json.JsonProperty("runtimeClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string RuntimeClassName { get; set; } - - /// If specified, the pod will be dispatched by specified scheduler. - /// If not specified, the pod will be dispatched by default scheduler. - /// +optional - [Newtonsoft.Json.JsonProperty("schedulerName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SchedulerName { get; set; } - - [Newtonsoft.Json.JsonProperty("securityContext", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodSecurityContext SecurityContext { get; set; } - - /// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - /// Deprecated: Use serviceAccountName instead. - /// +k8s:conversion-gen=false - /// +optional - [Newtonsoft.Json.JsonProperty("serviceAccount", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ServiceAccount { get; set; } - - /// ServiceAccountName is the name of the ServiceAccount to use to run this pod. - /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - /// +optional - [Newtonsoft.Json.JsonProperty("serviceAccountName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ServiceAccountName { get; set; } - - /// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - /// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - /// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - /// If a pod does not have FQDN, this has no effect. - /// Default to false. - /// +optional - [Newtonsoft.Json.JsonProperty("setHostnameAsFQDN", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? SetHostnameAsFQDN { get; set; } - - /// Share a single process namespace between all of the containers in a pod. - /// When this is set containers will be able to view and signal processes from other containers - /// in the same pod, and the first process in each container will not be assigned PID 1. - /// HostPID and ShareProcessNamespace cannot both be set. - /// Optional: Default to false. - /// +k8s:conversion-gen=false - /// +optional - [Newtonsoft.Json.JsonProperty("shareProcessNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ShareProcessNamespace { get; set; } - - /// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". - /// If not specified, the pod will not have a domainname at all. - /// +optional - [Newtonsoft.Json.JsonProperty("subdomain", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Subdomain { get; set; } - - /// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - /// Value must be non-negative integer. The value zero indicates stop immediately via - /// the kill signal (no opportunity to shut down). - /// If this value is nil, the default grace period will be used instead. - /// The grace period is the duration in seconds after the processes running in the pod are sent - /// a termination signal and the time when the processes are forcibly halted with a kill signal. - /// Set this value longer than the expected cleanup time for your process. - /// Defaults to 30 seconds. - /// +optional - [Newtonsoft.Json.JsonProperty("terminationGracePeriodSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? TerminationGracePeriodSeconds { get; set; } - - /// If specified, the pod's tolerations. - /// +optional - [Newtonsoft.Json.JsonProperty("tolerations", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Tolerations { get; set; } - - /// TopologySpreadConstraints describes how a group of pods ought to spread across topology - /// domains. Scheduler will schedule pods in a way which abides by the constraints. - /// All topologySpreadConstraints are ANDed. - /// +optional - /// +patchMergeKey=topologyKey - /// +patchStrategy=merge - /// +listType=map - /// +listMapKey=topologyKey - /// +listMapKey=whenUnsatisfiable - [Newtonsoft.Json.JsonProperty("topologySpreadConstraints", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection TopologySpreadConstraints { get; set; } - - /// List of volumes that can be mounted by containers belonging to the pod. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes - /// +optional - /// +patchMergeKey=name - /// +patchStrategy=merge,retainKeys - [Newtonsoft.Json.JsonProperty("volumes", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Volumes { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PortStatus - { - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Error { get; set; } - - [Newtonsoft.Json.JsonProperty("port", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Port { get; set; } - - [Newtonsoft.Json.JsonProperty("protocol", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Protocol { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PortworxVolumeSource - { - /// FSType represents the filesystem type to mount - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// VolumeID uniquely identifies a Portworx volume - [Newtonsoft.Json.JsonProperty("volumeID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeID { get; set; } - - - } - - /// An empty preferred scheduling term matches all objects with implicit weight 0 - /// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1PreferredSchedulingTerm - { - [Newtonsoft.Json.JsonProperty("preference", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1NodeSelectorTerm Preference { get; set; } - - /// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - [Newtonsoft.Json.JsonProperty("weight", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Weight { get; set; } - - - } - - /// Probe describes a health check to be performed against a container to determine whether it is - /// alive or ready to receive traffic. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Probe - { - [Newtonsoft.Json.JsonProperty("exec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ExecAction Exec { get; set; } - - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. - /// Defaults to 3. Minimum value is 1. - /// +optional - [Newtonsoft.Json.JsonProperty("failureThreshold", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? FailureThreshold { get; set; } - - [Newtonsoft.Json.JsonProperty("httpGet", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1HTTPGetAction HttpGet { get; set; } - - /// Number of seconds after the container has started before liveness probes are initiated. - /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - /// +optional - [Newtonsoft.Json.JsonProperty("initialDelaySeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? InitialDelaySeconds { get; set; } - - /// How often (in seconds) to perform the probe. - /// Default to 10 seconds. Minimum value is 1. - /// +optional - [Newtonsoft.Json.JsonProperty("periodSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? PeriodSeconds { get; set; } - - /// Minimum consecutive successes for the probe to be considered successful after having failed. - /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - /// +optional - [Newtonsoft.Json.JsonProperty("successThreshold", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? SuccessThreshold { get; set; } - - [Newtonsoft.Json.JsonProperty("tcpSocket", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1TCPSocketAction TcpSocket { get; set; } - - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - /// The grace period is the duration in seconds after the processes running in the pod are sent - /// a termination signal and the time when the processes are forcibly halted with a kill signal. - /// Set this value longer than the expected cleanup time for your process. - /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - /// value overrides the value provided by the pod spec. - /// Value must be non-negative integer. The value zero indicates stop immediately via - /// the kill signal (no opportunity to shut down). - /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - /// +optional - [Newtonsoft.Json.JsonProperty("terminationGracePeriodSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? TerminationGracePeriodSeconds { get; set; } - - /// Number of seconds after which the probe times out. - /// Defaults to 1 second. Minimum value is 1. - /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - /// +optional - [Newtonsoft.Json.JsonProperty("timeoutSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? TimeoutSeconds { get; set; } - - - } - - /// Represents a projected volume source - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ProjectedVolumeSource - { - /// Mode bits used to set permissions on created files by default. - /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - /// Directories within the path are not affected by this setting. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("defaultMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? DefaultMode { get; set; } - - /// list of volume projections - /// +optional - [Newtonsoft.Json.JsonProperty("sources", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Sources { get; set; } - - - } - - /// Quobyte volumes do not support ownership management or SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1QuobyteVolumeSource - { - /// Group to map volume access to - /// Default is no group - /// +optional - [Newtonsoft.Json.JsonProperty("group", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Group { get; set; } - - /// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. - /// Defaults to false. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Registry represents a single or multiple Quobyte Registry services - /// specified as a string as host:port pair (multiple entries are separated with commas) - /// which acts as the central registry for volumes - [Newtonsoft.Json.JsonProperty("registry", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Registry { get; set; } - - /// Tenant owning the given Quobyte volume in the Backend - /// Used with dynamically provisioned Quobyte volumes, value is set by the plugin - /// +optional - [Newtonsoft.Json.JsonProperty("tenant", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Tenant { get; set; } - - /// User to map volume access to - /// Defaults to serivceaccount user - /// +optional - [Newtonsoft.Json.JsonProperty("user", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string User { get; set; } - - /// Volume is a string that references an already created Quobyte volume by name. - [Newtonsoft.Json.JsonProperty("volume", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Volume { get; set; } - - - } - - /// RBD volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1RBDVolumeSource - { - /// Filesystem type of the volume that you want to mount. - /// Tip: Ensure that the filesystem type is supported by the host operating system. - /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - /// TODO: how do we prevent errors in the filesystem from compromising the machine - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// The rados image name. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - [Newtonsoft.Json.JsonProperty("image", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Image { get; set; } - - /// Keyring is the path to key ring for RBDUser. - /// Default is /etc/ceph/keyring. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("keyring", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Keyring { get; set; } - - /// A collection of Ceph monitors. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - [Newtonsoft.Json.JsonProperty("monitors", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Monitors { get; set; } - - /// The rados pool name. - /// Default is rbd. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("pool", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Pool { get; set; } - - /// ReadOnly here will force the ReadOnly setting in VolumeMounts. - /// Defaults to false. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// The rados user name. - /// Default is admin. - /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - /// +optional - [Newtonsoft.Json.JsonProperty("user", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string User { get; set; } - - - } - - /// ResourceFieldSelector represents container resources (cpu, memory) and their output format - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ResourceFieldSelector - { - /// Container name: required for volumes, optional for env vars - /// +optional - [Newtonsoft.Json.JsonProperty("containerName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ContainerName { get; set; } - - [Newtonsoft.Json.JsonProperty("divisor", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Divisor { get; set; } - - /// Required: resource to select - [Newtonsoft.Json.JsonProperty("resource", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Resource { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ResourceList : System.Collections.Generic.Dictionary - { - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ResourceRequirements - { - [Newtonsoft.Json.JsonProperty("limits", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceList Limits { get; set; } - - [Newtonsoft.Json.JsonProperty("requests", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ResourceList Requests { get; set; } - - - } - - /// SELinuxOptions are the labels to be applied to the container - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SELinuxOptions - { - /// Level is SELinux level label that applies to the container. - /// +optional - [Newtonsoft.Json.JsonProperty("level", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Level { get; set; } - - /// Role is a SELinux role label that applies to the container. - /// +optional - [Newtonsoft.Json.JsonProperty("role", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Role { get; set; } - - /// Type is a SELinux type label that applies to the container. - /// +optional - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Type { get; set; } - - /// User is a SELinux user label that applies to the container. - /// +optional - [Newtonsoft.Json.JsonProperty("user", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string User { get; set; } - - - } - - /// ScaleIOVolumeSource represents a persistent ScaleIO volume - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ScaleIOVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". - /// Default is "xfs". - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// The host address of the ScaleIO API Gateway. - [Newtonsoft.Json.JsonProperty("gateway", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Gateway { get; set; } - - /// The name of the ScaleIO Protection Domain for the configured storage. - /// +optional - [Newtonsoft.Json.JsonProperty("protectionDomain", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ProtectionDomain { get; set; } - - /// Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// Flag to enable/disable SSL communication with Gateway, default false - /// +optional - [Newtonsoft.Json.JsonProperty("sslEnabled", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? SslEnabled { get; set; } - - /// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - /// Default is ThinProvisioned. - /// +optional - [Newtonsoft.Json.JsonProperty("storageMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StorageMode { get; set; } - - /// The ScaleIO Storage Pool associated with the protection domain. - /// +optional - [Newtonsoft.Json.JsonProperty("storagePool", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StoragePool { get; set; } - - /// The name of the storage system as configured in ScaleIO. - [Newtonsoft.Json.JsonProperty("system", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string System { get; set; } - - /// The name of a volume already created in the ScaleIO system - /// that is associated with this volume source. - [Newtonsoft.Json.JsonProperty("volumeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeName { get; set; } - - - } - - /// Only one profile source may be set. - /// +union - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SeccompProfile - { - /// localhostProfile indicates a profile defined in a file on the node should be used. - /// The profile must be preconfigured on the node to work. - /// Must be a descending path, relative to the kubelet's configured seccomp profile location. - /// Must only be set if type is "Localhost". - /// +optional - [Newtonsoft.Json.JsonProperty("localhostProfile", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string LocalhostProfile { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Type { get; set; } - - - } - - /// The contents of the target Secret's Data field will represent the - /// key-value pairs as environment variables. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SecretEnvSource - { - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the Secret must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SecretKeySelector - { - /// The key of the secret to select from. Must be a valid secret key. - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the Secret or its key must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// The contents of the target Secret's Data field will be presented in a - /// projected volume as files using the keys in the Data field as the file names. - /// Note that this is identical to a secret volume source without the default - /// mode. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SecretProjection - { - /// If unspecified, each key-value pair in the Data field of the referenced - /// Secret will be projected into the volume as a file whose name is the - /// key and content is the value. If specified, the listed keys will be - /// projected into the specified paths, and unlisted keys will not be - /// present. If a key is specified which is not present in the Secret, - /// the volume setup will error unless it is marked optional. Paths must be - /// relative and may not contain the '..' path or start with '..'. - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - /// Name of the referent. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - /// TODO: Add other useful fields. apiVersion, kind, uid? - /// +optional - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Specify whether the Secret or its key must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - - } - - /// The contents of the target Secret's Data field will be presented in a volume - /// as files using the keys in the Data field as the file names. - /// Secret volumes support ownership management and SELinux relabeling. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SecretVolumeSource - { - /// Optional: mode bits used to set permissions on created files by default. - /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - /// YAML accepts both octal and decimal values, JSON requires decimal values - /// for mode bits. Defaults to 0644. - /// Directories within the path are not affected by this setting. - /// This might be in conflict with other options that affect the file - /// mode, like fsGroup, and the result can be other mode bits set. - /// +optional - [Newtonsoft.Json.JsonProperty("defaultMode", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? DefaultMode { get; set; } - - /// If unspecified, each key-value pair in the Data field of the referenced - /// Secret will be projected into the volume as a file whose name is the - /// key and content is the value. If specified, the listed keys will be - /// projected into the specified paths, and unlisted keys will not be - /// present. If a key is specified which is not present in the Secret, - /// the volume setup will error unless it is marked optional. Paths must be - /// relative and may not contain the '..' path or start with '..'. - /// +optional - [Newtonsoft.Json.JsonProperty("items", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Items { get; set; } - - /// Specify whether the Secret or its keys must be defined - /// +optional - [Newtonsoft.Json.JsonProperty("optional", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Optional { get; set; } - - /// Name of the secret in the pod's namespace to use. - /// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - /// +optional - [Newtonsoft.Json.JsonProperty("secretName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SecretName { get; set; } - - - } - - /// Some fields are present in both SecurityContext and PodSecurityContext. When both - /// are set, the values in SecurityContext take precedence. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SecurityContext - { - /// AllowPrivilegeEscalation controls whether a process can gain more - /// privileges than its parent process. This bool directly controls if - /// the no_new_privs flag will be set on the container process. - /// AllowPrivilegeEscalation is true always when the container is: - /// 1) run as Privileged - /// 2) has CAP_SYS_ADMIN - /// +optional - [Newtonsoft.Json.JsonProperty("allowPrivilegeEscalation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? AllowPrivilegeEscalation { get; set; } - - [Newtonsoft.Json.JsonProperty("capabilities", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1Capabilities Capabilities { get; set; } - - /// Run container in privileged mode. - /// Processes in privileged containers are essentially equivalent to root on the host. - /// Defaults to false. - /// +optional - [Newtonsoft.Json.JsonProperty("privileged", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? Privileged { get; set; } - - [Newtonsoft.Json.JsonProperty("procMount", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ProcMount { get; set; } - - /// Whether this container has a read-only root filesystem. - /// Default is false. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnlyRootFilesystem", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnlyRootFilesystem { get; set; } - - /// The GID to run the entrypoint of the container process. - /// Uses runtime default if unset. - /// May also be set in PodSecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsGroup", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? RunAsGroup { get; set; } - - /// Indicates that the container must run as a non-root user. - /// If true, the Kubelet will validate the image at runtime to ensure that it - /// does not run as UID 0 (root) and fail to start the container if it does. - /// If unset or false, no such validation will be performed. - /// May also be set in PodSecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsNonRoot", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? RunAsNonRoot { get; set; } - - /// The UID to run the entrypoint of the container process. - /// Defaults to user specified in image metadata if unspecified. - /// May also be set in PodSecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsUser", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? RunAsUser { get; set; } - - [Newtonsoft.Json.JsonProperty("seLinuxOptions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SELinuxOptions SeLinuxOptions { get; set; } - - [Newtonsoft.Json.JsonProperty("seccompProfile", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SeccompProfile SeccompProfile { get; set; } - - [Newtonsoft.Json.JsonProperty("windowsOptions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1WindowsSecurityContextOptions WindowsOptions { get; set; } - - - } - - /// Service is a named abstraction of software service (for example, mysql) consisting of local port - /// (for example 3306) that the proxy listens on, and the selector that determines which pods - /// will answer requests sent through the proxy. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Service - { - [Newtonsoft.Json.JsonProperty("metadata", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ObjectMeta Metadata { get; set; } - - [Newtonsoft.Json.JsonProperty("spec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ServiceSpec Spec { get; set; } - - [Newtonsoft.Json.JsonProperty("status", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ServiceStatus Status { get; set; } - - - } - - /// ServiceAccountTokenProjection represents a projected service account token - /// volume. This projection can be used to insert a service account token into - /// the pods runtime filesystem for use against APIs (Kubernetes API Server or - /// otherwise). - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ServiceAccountTokenProjection - { - /// Audience is the intended audience of the token. A recipient of a token - /// must identify itself with an identifier specified in the audience of the - /// token, and otherwise should reject the token. The audience defaults to the - /// identifier of the apiserver. - /// +optional - [Newtonsoft.Json.JsonProperty("audience", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Audience { get; set; } - - /// ExpirationSeconds is the requested duration of validity of the service - /// account token. As the token approaches expiration, the kubelet volume - /// plugin will proactively rotate the service account token. The kubelet will - /// start trying to rotate the token if the token is older than 80 percent of - /// its time to live or if the token is older than 24 hours.Defaults to 1 hour - /// and must be at least 10 minutes. - /// +optional - [Newtonsoft.Json.JsonProperty("expirationSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? ExpirationSeconds { get; set; } - - /// Path is the path relative to the mount point of the file to project the - /// token into. - [Newtonsoft.Json.JsonProperty("path", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Path { get; set; } - - - } - - /// ServiceBackendPort is the service port being referenced. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ServiceBackendPort - { - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("number", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Number { get; set; } - - - } - - /// ServicePort contains information on service's port. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ServicePort - { - [Newtonsoft.Json.JsonProperty("appProtocol", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string AppProtocol { get; set; } - - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("nodePort", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? NodePort { get; set; } - - /// The port that will be exposed by this service. - [Newtonsoft.Json.JsonProperty("port", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Port { get; set; } - - [Newtonsoft.Json.JsonProperty("protocol", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Protocol { get; set; } - - [Newtonsoft.Json.JsonProperty("targetPort", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public IntstrIntOrString TargetPort { get; set; } - - - } - - /// ServiceSpec describes the attributes that a user creates on a service. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ServiceSpec - { - [Newtonsoft.Json.JsonProperty("allocateLoadBalancerNodePorts", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? AllocateLoadBalancerNodePorts { get; set; } - - [Newtonsoft.Json.JsonProperty("clusterIP", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ClusterIP { get; set; } - - /// ClusterIPs is a list of IP addresses assigned to this service, and are - /// usually assigned randomly. If an address is specified manually, is - /// in-range (as per system configuration), and is not in use, it will be - /// allocated to the service; otherwise creation of the service will fail. - /// This field may not be changed through updates unless the type field is - /// also being changed to ExternalName (which requires this field to be - /// empty) or the type field is being changed from ExternalName (in which - /// case this field may optionally be specified, as describe above). Valid - /// values are "None", empty string (""), or a valid IP address. Setting - /// this to "None" makes a "headless service" (no virtual IP), which is - /// useful when direct endpoint connections are preferred and proxying is - /// not required. Only applies to types ClusterIP, NodePort, and - /// LoadBalancer. If this field is specified when creating a Service of type - /// ExternalName, creation will fail. This field will be wiped when updating - /// a Service to type ExternalName. If this field is not specified, it will - /// be initialized from the clusterIP field. If this field is specified, - /// clients must ensure that clusterIPs[0] and clusterIP have the same - /// value. - /// - /// Unless the "IPv6DualStack" feature gate is enabled, this field is - /// limited to one value, which must be the same as the clusterIP field. If - /// the feature gate is enabled, this field may hold a maximum of two - /// entries (dual-stack IPs, in either order). These IPs must correspond to - /// the values of the ipFamilies field. Both clusterIPs and ipFamilies are - /// governed by the ipFamilyPolicy field. - /// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - /// +listType=atomic - /// +optional - [Newtonsoft.Json.JsonProperty("clusterIPs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ClusterIPs { get; set; } - - [Newtonsoft.Json.JsonProperty("externalIPs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection ExternalIPs { get; set; } - - [Newtonsoft.Json.JsonProperty("externalName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ExternalName { get; set; } - - [Newtonsoft.Json.JsonProperty("externalTrafficPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ExternalTrafficPolicy { get; set; } - - [Newtonsoft.Json.JsonProperty("healthCheckNodePort", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? HealthCheckNodePort { get; set; } - - [Newtonsoft.Json.JsonProperty("internalTrafficPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string InternalTrafficPolicy { get; set; } - - /// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - /// service, and is gated by the "IPv6DualStack" feature gate. This field - /// is usually assigned automatically based on cluster configuration and the - /// ipFamilyPolicy field. If this field is specified manually, the requested - /// family is available in the cluster, and ipFamilyPolicy allows it, it - /// will be used; otherwise creation of the service will fail. This field - /// is conditionally mutable: it allows for adding or removing a secondary - /// IP family, but it does not allow changing the primary IP family of the - /// Service. Valid values are "IPv4" and "IPv6". This field only applies - /// to Services of types ClusterIP, NodePort, and LoadBalancer, and does - /// apply to "headless" services. This field will be wiped when updating a - /// Service to type ExternalName. - /// - /// This field may hold a maximum of two entries (dual-stack families, in - /// either order). These families must correspond to the values of the - /// clusterIPs field, if specified. Both clusterIPs and ipFamilies are - /// governed by the ipFamilyPolicy field. - /// +listType=atomic - /// +optional - [Newtonsoft.Json.JsonProperty("ipFamilies", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection IpFamilies { get; set; } - - [Newtonsoft.Json.JsonProperty("ipFamilyPolicy", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string IpFamilyPolicy { get; set; } - - [Newtonsoft.Json.JsonProperty("loadBalancerClass", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string LoadBalancerClass { get; set; } - - [Newtonsoft.Json.JsonProperty("loadBalancerIP", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string LoadBalancerIP { get; set; } - - [Newtonsoft.Json.JsonProperty("loadBalancerSourceRanges", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection LoadBalancerSourceRanges { get; set; } - - [Newtonsoft.Json.JsonProperty("ports", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Ports { get; set; } - - [Newtonsoft.Json.JsonProperty("publishNotReadyAddresses", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? PublishNotReadyAddresses { get; set; } - - [Newtonsoft.Json.JsonProperty("selector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.IDictionary Selector { get; set; } - - [Newtonsoft.Json.JsonProperty("sessionAffinity", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SessionAffinity { get; set; } - - [Newtonsoft.Json.JsonProperty("sessionAffinityConfig", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SessionAffinityConfig SessionAffinityConfig { get; set; } - - [Newtonsoft.Json.JsonProperty("type", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Type { get; set; } - - - } - - /// ServiceStatus represents the current status of a service. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1ServiceStatus - { - [Newtonsoft.Json.JsonProperty("conditions", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public System.Collections.Generic.ICollection Conditions { get; set; } - - [Newtonsoft.Json.JsonProperty("loadBalancer", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LoadBalancerStatus LoadBalancer { get; set; } - - - } - - /// SessionAffinityConfig represents the configurations of session affinity. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1SessionAffinityConfig - { - [Newtonsoft.Json.JsonProperty("clientIP", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ClientIPConfig ClientIP { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1StorageOSVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Defaults to false (read/write). ReadOnly here will force - /// the ReadOnly setting in VolumeMounts. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - [Newtonsoft.Json.JsonProperty("secretRef", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LocalObjectReference SecretRef { get; set; } - - /// VolumeName is the human-readable name of the StorageOS volume. Volume - /// names are only unique within a namespace. - [Newtonsoft.Json.JsonProperty("volumeName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeName { get; set; } - - /// VolumeNamespace specifies the scope of the volume within StorageOS. If no - /// namespace is specified then the Pod's namespace will be used. This allows the - /// Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - /// Set VolumeName to any name to override the default behaviour. - /// Set to "default" if you are not using namespaces within StorageOS. - /// Namespaces that do not pre-exist within StorageOS will be created. - /// +optional - [Newtonsoft.Json.JsonProperty("volumeNamespace", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumeNamespace { get; set; } - - - } - - /// Sysctl defines a kernel parameter to be set - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Sysctl - { - /// Name of a property to set - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Value of a property to set - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Value { get; set; } - - - } - - /// TCPSocketAction describes an action based on opening a socket - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1TCPSocketAction - { - /// Optional: Host name to connect to, defaults to the pod IP. - /// +optional - [Newtonsoft.Json.JsonProperty("host", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Host { get; set; } - - [Newtonsoft.Json.JsonProperty("port", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public IntstrIntOrString Port { get; set; } - - - } - - /// The pod this Toleration is attached to tolerates any taint that matches - /// the triple <key,value,effect> using the matching operator <operator>. - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Toleration - { - [Newtonsoft.Json.JsonProperty("effect", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Effect { get; set; } - - /// Key is the taint key that the toleration applies to. Empty means match all taint keys. - /// If the key is empty, operator must be Exists; this combination means to match all values and all keys. - /// +optional - [Newtonsoft.Json.JsonProperty("key", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Key { get; set; } - - [Newtonsoft.Json.JsonProperty("operator", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Operator { get; set; } - - /// TolerationSeconds represents the period of time the toleration (which must be - /// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - /// it is not set, which means tolerate the taint forever (do not evict). Zero and - /// negative values will be treated as 0 (evict immediately) by the system. - /// +optional - [Newtonsoft.Json.JsonProperty("tolerationSeconds", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public long? TolerationSeconds { get; set; } - - /// Value is the taint value the toleration matches to. - /// If the operator is Exists, the value should be empty, otherwise just a regular string. - /// +optional - [Newtonsoft.Json.JsonProperty("value", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Value { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1TopologySpreadConstraint - { - [Newtonsoft.Json.JsonProperty("labelSelector", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1LabelSelector LabelSelector { get; set; } - - /// MaxSkew describes the degree to which pods may be unevenly distributed. - /// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - /// between the number of matching pods in the target topology and the global minimum. - /// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - /// labelSelector spread as 1/1/0: - /// +-------+-------+-------+ - /// zone1 | zone2 | zone3 | - /// +-------+-------+-------+ - /// P | P | | - /// +-------+-------+-------+ - /// if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - /// scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) - /// violate MaxSkew(1). - /// if MaxSkew is 2, incoming pod can be scheduled onto any zone. - /// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - /// to topologies that satisfy it. - /// It's a required field. Default value is 1 and 0 is not allowed. - [Newtonsoft.Json.JsonProperty("maxSkew", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? MaxSkew { get; set; } - - /// TopologyKey is the key of node labels. Nodes that have a label with this key - /// and identical values are considered to be in the same topology. - /// We consider each <key, value> as a "bucket", and try to put balanced number - /// of pods into each bucket. - /// It's a required field. - [Newtonsoft.Json.JsonProperty("topologyKey", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string TopologyKey { get; set; } - - [Newtonsoft.Json.JsonProperty("whenUnsatisfiable", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string WhenUnsatisfiable { get; set; } - - - } - - /// TypedLocalObjectReference contains enough information to let you locate the - /// typed referenced object inside the same namespace. - /// +structType=atomic - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1TypedLocalObjectReference - { - /// APIGroup is the group for the resource being referenced. - /// If APIGroup is not specified, the specified Kind must be in the core API group. - /// For any other third-party types, APIGroup is required. - /// +optional - [Newtonsoft.Json.JsonProperty("apiGroup", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string ApiGroup { get; set; } - - /// Kind is the type of resource being referenced - [Newtonsoft.Json.JsonProperty("kind", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Kind { get; set; } - - /// Name is the name of resource being referenced - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1Volume - { - [Newtonsoft.Json.JsonProperty("awsElasticBlockStore", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1AWSElasticBlockStoreVolumeSource AwsElasticBlockStore { get; set; } - - [Newtonsoft.Json.JsonProperty("azureDisk", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1AzureDiskVolumeSource AzureDisk { get; set; } - - [Newtonsoft.Json.JsonProperty("azureFile", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1AzureFileVolumeSource AzureFile { get; set; } - - [Newtonsoft.Json.JsonProperty("cephfs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1CephFSVolumeSource Cephfs { get; set; } - - [Newtonsoft.Json.JsonProperty("cinder", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1CinderVolumeSource Cinder { get; set; } - - [Newtonsoft.Json.JsonProperty("configMap", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ConfigMapVolumeSource ConfigMap { get; set; } - - [Newtonsoft.Json.JsonProperty("csi", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1CSIVolumeSource Csi { get; set; } - - [Newtonsoft.Json.JsonProperty("downwardAPI", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1DownwardAPIVolumeSource DownwardAPI { get; set; } - - [Newtonsoft.Json.JsonProperty("emptyDir", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1EmptyDirVolumeSource EmptyDir { get; set; } - - [Newtonsoft.Json.JsonProperty("ephemeral", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1EphemeralVolumeSource Ephemeral { get; set; } - - [Newtonsoft.Json.JsonProperty("fc", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1FCVolumeSource Fc { get; set; } - - [Newtonsoft.Json.JsonProperty("flexVolume", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1FlexVolumeSource FlexVolume { get; set; } - - [Newtonsoft.Json.JsonProperty("flocker", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1FlockerVolumeSource Flocker { get; set; } - - [Newtonsoft.Json.JsonProperty("gcePersistentDisk", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1GCEPersistentDiskVolumeSource GcePersistentDisk { get; set; } - - [Newtonsoft.Json.JsonProperty("gitRepo", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1GitRepoVolumeSource GitRepo { get; set; } - - [Newtonsoft.Json.JsonProperty("glusterfs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1GlusterfsVolumeSource Glusterfs { get; set; } - - [Newtonsoft.Json.JsonProperty("hostPath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1HostPathVolumeSource HostPath { get; set; } - - [Newtonsoft.Json.JsonProperty("iscsi", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ISCSIVolumeSource Iscsi { get; set; } - - /// Volume's name. - /// Must be a DNS_LABEL and unique within the pod. - /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - [Newtonsoft.Json.JsonProperty("nfs", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1NFSVolumeSource Nfs { get; set; } - - [Newtonsoft.Json.JsonProperty("persistentVolumeClaim", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PersistentVolumeClaimVolumeSource PersistentVolumeClaim { get; set; } - - [Newtonsoft.Json.JsonProperty("photonPersistentDisk", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PhotonPersistentDiskVolumeSource PhotonPersistentDisk { get; set; } - - [Newtonsoft.Json.JsonProperty("portworxVolume", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PortworxVolumeSource PortworxVolume { get; set; } - - [Newtonsoft.Json.JsonProperty("projected", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ProjectedVolumeSource Projected { get; set; } - - [Newtonsoft.Json.JsonProperty("quobyte", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1QuobyteVolumeSource Quobyte { get; set; } - - [Newtonsoft.Json.JsonProperty("rbd", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1RBDVolumeSource Rbd { get; set; } - - [Newtonsoft.Json.JsonProperty("scaleIO", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ScaleIOVolumeSource ScaleIO { get; set; } - - [Newtonsoft.Json.JsonProperty("secret", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecretVolumeSource Secret { get; set; } - - [Newtonsoft.Json.JsonProperty("storageos", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1StorageOSVolumeSource Storageos { get; set; } - - [Newtonsoft.Json.JsonProperty("vsphereVolume", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1VsphereVirtualDiskVolumeSource VsphereVolume { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1VolumeDevice - { - /// devicePath is the path inside of the container that the device will be mapped to. - [Newtonsoft.Json.JsonProperty("devicePath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string DevicePath { get; set; } - - /// name must match the name of a persistentVolumeClaim in the pod - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1VolumeMount - { - /// Path within the container at which the volume should be mounted. Must - /// not contain ':'. - [Newtonsoft.Json.JsonProperty("mountPath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string MountPath { get; set; } - - [Newtonsoft.Json.JsonProperty("mountPropagation", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string MountPropagation { get; set; } - - /// This must match the Name of a Volume. - [Newtonsoft.Json.JsonProperty("name", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string Name { get; set; } - - /// Mounted read-only if true, read-write otherwise (false or unspecified). - /// Defaults to false. - /// +optional - [Newtonsoft.Json.JsonProperty("readOnly", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? ReadOnly { get; set; } - - /// Path within the volume from which the container's volume should be mounted. - /// Defaults to "" (volume's root). - /// +optional - [Newtonsoft.Json.JsonProperty("subPath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SubPath { get; set; } - - /// Expanded path within the volume from which the container's volume should be mounted. - /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - /// Defaults to "" (volume's root). - /// SubPathExpr and SubPath are mutually exclusive. - /// +optional - [Newtonsoft.Json.JsonProperty("subPathExpr", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string SubPathExpr { get; set; } - - - } - - /// Projection that may be projected along with other supported volume types - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1VolumeProjection - { - [Newtonsoft.Json.JsonProperty("configMap", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ConfigMapProjection ConfigMap { get; set; } - - [Newtonsoft.Json.JsonProperty("downwardAPI", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1DownwardAPIProjection DownwardAPI { get; set; } - - [Newtonsoft.Json.JsonProperty("secret", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1SecretProjection Secret { get; set; } - - [Newtonsoft.Json.JsonProperty("serviceAccountToken", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1ServiceAccountTokenProjection ServiceAccountToken { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1VsphereVirtualDiskVolumeSource - { - /// Filesystem type to mount. - /// Must be a filesystem type supported by the host operating system. - /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - /// +optional - [Newtonsoft.Json.JsonProperty("fsType", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string FsType { get; set; } - - /// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - /// +optional - [Newtonsoft.Json.JsonProperty("storagePolicyID", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StoragePolicyID { get; set; } - - /// Storage Policy Based Management (SPBM) profile name. - /// +optional - [Newtonsoft.Json.JsonProperty("storagePolicyName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string StoragePolicyName { get; set; } - - /// Path that identifies vSphere volume vmdk - [Newtonsoft.Json.JsonProperty("volumePath", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string VolumePath { get; set; } - - - } - - /// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1WeightedPodAffinityTerm - { - [Newtonsoft.Json.JsonProperty("podAffinityTerm", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public V1PodAffinityTerm PodAffinityTerm { get; set; } - - /// weight associated with matching the corresponding podAffinityTerm, - /// in the range 1-100. - [Newtonsoft.Json.JsonProperty("weight", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public int? Weight { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class V1WindowsSecurityContextOptions - { - /// GMSACredentialSpec is where the GMSA admission webhook - /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - /// GMSA credential spec named by the GMSACredentialSpecName field. - /// +optional - [Newtonsoft.Json.JsonProperty("gmsaCredentialSpec", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string GmsaCredentialSpec { get; set; } - - /// GMSACredentialSpecName is the name of the GMSA credential spec to use. - /// +optional - [Newtonsoft.Json.JsonProperty("gmsaCredentialSpecName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string GmsaCredentialSpecName { get; set; } - - /// HostProcess determines if a container should be run as a 'Host Process' container. - /// This field is alpha-level and will only be honored by components that enable the - /// WindowsHostProcessContainers feature flag. Setting this field without the feature - /// flag will result in errors when validating the Pod. All of a Pod's containers must - /// have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - /// containers and non-HostProcess containers). In addition, if HostProcess is true - /// then HostNetwork must also be set to true. - /// +optional - [Newtonsoft.Json.JsonProperty("hostProcess", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public bool? HostProcess { get; set; } - - /// The UserName in Windows to run the entrypoint of the container process. - /// Defaults to the user specified in image metadata if unspecified. - /// May also be set in PodSecurityContext. If set in both SecurityContext and - /// PodSecurityContext, the value specified in SecurityContext takes precedence. - /// +optional - [Newtonsoft.Json.JsonProperty("runAsUserName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public string RunAsUserName { get; set; } - - - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] - public partial class Response - { - [Newtonsoft.Json.JsonProperty("error", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public RuntimeStreamError Error { get; set; } - - [Newtonsoft.Json.JsonProperty("result", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] - public ApiStreamingQueueMessage Result { get; set; } - - - } - - public partial class FileResponse : System.IDisposable - { - private System.IDisposable _client; - private System.IDisposable _response; - - public int StatusCode { get; private set; } - - public System.Collections.Generic.IReadOnlyDictionary> Headers { get; private set; } - - public System.IO.Stream Stream { get; private set; } - - public bool IsPartial - { - get { return StatusCode == 206; } - } - - public FileResponse(int statusCode, System.Collections.Generic.IReadOnlyDictionary> headers, System.IO.Stream stream, System.IDisposable client, System.IDisposable response) - { - StatusCode = statusCode; - Headers = headers; - Stream = stream; - _client = client; - _response = response; - } - - public void Dispose() - { - if (Stream != null) - Stream.Dispose(); - if (_response != null) - _response.Dispose(); - if (_client != null) - _client.Dispose(); - } - } - - [System.CodeDom.Compiler.GeneratedCode("NSwag", "13.1.3.0 (NJsonSchema v10.0.27.0 (Newtonsoft.Json v12.0.0.0))")] - public partial class ApiException : System.Exception - { - public int StatusCode { get; private set; } - - public string Response { get; private set; } - - public System.Collections.Generic.IReadOnlyDictionary> Headers { get; private set; } - - public ApiException(string message, int statusCode, string response, System.Collections.Generic.IReadOnlyDictionary> headers, System.Exception innerException) - : base(message + "\n\nStatus: " + statusCode + "\nResponse: \n" + response.Substring(0, response.Length >= 512 ? 512 : response.Length), innerException) - { - StatusCode = statusCode; - Response = response; - Headers = headers; - } - - public override string ToString() - { - return string.Format("HTTP Response: \n\n{0}\n\n{1}", Response, base.ToString()); - } - } - - [System.CodeDom.Compiler.GeneratedCode("NSwag", "13.1.3.0 (NJsonSchema v10.0.27.0 (Newtonsoft.Json v12.0.0.0))")] - public partial class ApiException : ApiException - { - public TResult Result { get; private set; } - - public ApiException(string message, int statusCode, string response, System.Collections.Generic.IReadOnlyDictionary> headers, TResult result, System.Exception innerException) - : base(message, statusCode, response, headers, innerException) - { - Result = result; - } - } - -} - -#pragma warning restore 1591 -#pragma warning restore 1573 -#pragma warning restore 472 -#pragma warning restore 114 -#pragma warning restore 108 \ No newline at end of file diff --git a/client/python/armada_client/__init__.py b/client/python/armada_client/__init__.py index e69de29bb2d..c1e297ad39a 100644 --- a/client/python/armada_client/__init__.py +++ b/client/python/armada_client/__init__.py @@ -0,0 +1,14 @@ +try: + from .typings import JobState + from ._proto_methods import is_active, is_terminal + + JobState.is_active = is_active + JobState.is_terminal = is_terminal + + del is_active, is_terminal, JobState +except ImportError: + """ + Import errors occur during proto generation, where certain + modules import types that don't exist yet. We can safely ignore these failures + """ + pass diff --git a/client/python/armada_client/_proto_methods.py b/client/python/armada_client/_proto_methods.py new file mode 100644 index 00000000000..ab8bd65fbf6 --- /dev/null +++ b/client/python/armada_client/_proto_methods.py @@ -0,0 +1,38 @@ +from armada_client.typings import JobState + + +def is_terminal(self) -> bool: + """ + Determines if a job state is terminal. + + Terminal states indicate that a job has completed its lifecycle, + whether successfully or due to failure. + + :param state: The current state of the job. + :type state: JobState + + :returns: True if the job state is terminal, False if it is active. + :rtype: bool + """ + terminal_states = { + JobState.SUCCEEDED, + JobState.FAILED, + JobState.CANCELLED, + JobState.PREEMPTED, + } + return self in terminal_states + + +def is_active(self) -> bool: + """ + Determines if a job state is active. + + Active states indicate that a job is still running or in a non-terminal state. + + :param state: The current state of the job. + :type state: JobState + + :returns: True if the job state is active, False if it is terminal. + :rtype: bool + """ + return not is_terminal(self) diff --git a/client/python/armada_client/asyncio_client.py b/client/python/armada_client/asyncio_client.py index 333575f35f1..301e7923445 100644 --- a/client/python/armada_client/asyncio_client.py +++ b/client/python/armada_client/asyncio_client.py @@ -5,6 +5,8 @@ https://armadaproject.io/api """ +from datetime import timedelta +import logging from typing import Dict, List, Optional, AsyncIterator import grpc @@ -16,11 +18,75 @@ submit_pb2, submit_pb2_grpc, health_pb2, + job_pb2, + job_pb2_grpc, ) from armada_client.event import Event from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 from armada_client.permissions import Permissions from armada_client.typings import JobState +from armada_client.iterators import AsyncTimeoutIterator, IteratorTimeoutException + + +class _AsyncResilientArmadaEventStream(AsyncIterator[event_pb2.EventStreamMessage]): + def __init__( + self, + *, + queue: str, + job_set_id: str, + from_message_id: Optional[str] = None, + event_stub: event_pb2_grpc.EventStub, + event_timeout: timedelta, + ): + self._queue = queue + self._job_set_id = job_set_id + self._last_message_id = from_message_id or "" + self._stream = None + self._cancelled = False + self._event_stub = event_stub + self._event_timeout = event_timeout + self._timeout_iterator = None + + def __aiter__(self) -> AsyncIterator[event_pb2.EventStreamMessage]: + return self + + async def __anext__(self) -> event_pb2.EventStreamMessage: + while True: + if self._cancelled: + raise StopAsyncIteration() + if self._timeout_iterator is None: + self._timeout_iterator = self._re_connect() + try: + # we can't use anext here, as it requires python 3.10+ + message = await self._timeout_iterator.__anext__() + self._last_message_id = message.id + return message + except IteratorTimeoutException: + self._timeout_iterator = None + + def _re_connect(self): + self._close_connection() + jsr = event_pb2.JobSetRequest( + queue=self._queue, + id=self._job_set_id, + from_message_id=self._last_message_id, + watch=True, + errorIfMissing=True, + ) + self._stream = self._event_stub.GetJobSetEvents(jsr) + return AsyncTimeoutIterator(self._stream, timeout=self._event_timeout) + + def _close_connection(self): + if self._stream is not None: + self._stream.cancel() + self._stream = None + + def cancel(self): + self._cancelled = True + self._close_connection() + + +logger = logging.getLogger("armada_client.asyncio_client") class ArmadaAsyncIOClient: @@ -33,9 +99,15 @@ class ArmadaAsyncIOClient: :return: an Armada client instance """ - def __init__(self, channel: grpc.aio.Channel) -> None: + def __init__( + self, + channel: grpc.aio.Channel, + event_timeout: timedelta = timedelta(minutes=15), + ) -> None: self.submit_stub = submit_pb2_grpc.SubmitStub(channel) self.event_stub = event_pb2_grpc.EventStub(channel) + self.job_stub = job_pb2_grpc.JobsStub(channel) + self.event_timeout = event_timeout async def get_job_events_stream( self, @@ -62,19 +134,13 @@ async def get_job_events_stream( :param from_message_id: The from message id. :return: A job events stream for the job_set_id provided. """ - - if from_message_id is None: - from_message_id = "" - - jsr = event_pb2.JobSetRequest( + return _AsyncResilientArmadaEventStream( queue=queue, - id=job_set_id, + job_set_id=job_set_id, from_message_id=from_message_id, - watch=True, - errorIfMissing=True, + event_stub=self.event_stub, + event_timeout=self.event_timeout, ) - response = self.event_stub.GetJobSetEvents(jsr) - return response @staticmethod def unmarshal_event_response(event: event_pb2.EventStreamMessage) -> Event: @@ -106,7 +172,7 @@ async def event_health(self) -> health_pb2.HealthCheckResponse: async def submit_jobs( self, queue: str, job_set_id: str, job_request_items ) -> AsyncIterator[submit_pb2.JobSubmitResponse]: - """Submit a armada job. + """Submit an armada job. Uses SubmitJobs RPC to submit a job. @@ -122,38 +188,77 @@ async def submit_jobs( response = await self.submit_stub.SubmitJobs(request) return response + async def get_job_status(self, job_ids: List[str]) -> job_pb2.JobStatusResponse: + """ + Asynchronously retrieves the status of a list of jobs from Armada. + + :param job_ids: A list of unique job identifiers. + :type job_ids: List[str] + + :returns: The response from the server containing the job status. + :rtype: JobStatusResponse + """ + req = job_pb2.JobStatusRequest(job_ids=job_ids) + resp = await self.job_stub.GetJobStatus(req) + return resp + + async def get_job_details(self, job_ids: List[str]) -> job_pb2.JobDetailsResponse: + """ + Asynchronously retrieves the details of a job from Armada. + + :param job_ids: A list of unique job identifiers. + :type job_ids: List[str] + + :returns: The Armada job details response. + """ + req = job_pb2.JobDetailsRequest(job_ids=job_ids, expand_job_run=True) + resp = await self.job_stub.GetJobDetails(req) + return resp + + async def get_job_run_details( + self, run_ids: List[str] + ) -> job_pb2.JobRunDetailsResponse: + """ + Asynchronously retrieves the details of a job run from Armada. + + :param run_ids: A list of unique job run identifiers. + :type run_ids: List[str] + + :returns: The Armada run details response. + """ + req = job_pb2.JobRunDetailsRequest(run_ids=run_ids) + resp = await self.job_stub.GetJobRunDetails(req) + return resp + async def cancel_jobs( self, - queue: Optional[str] = None, + queue: str, + job_set_id: str, job_id: Optional[str] = None, - job_set_id: Optional[str] = None, ) -> submit_pb2.CancellationResult: """Cancel jobs in a given queue. - Uses the CancelJobs RPC to cancel jobs. Either job_id or - job_set_id is required. + Uses the CancelJobs RPC to cancel jobs. :param queue: The name of the queue - :param job_id: The name of the job id (this or job_set_id required) - :param job_set_id: An array of JobSubmitRequestItems. (this or job_id required) + :param job_set_id: The name of the job set id + :param job_id: The name of the job id (optional), if empty - cancel all jobs :return: A CancellationResult object. """ + if not queue or not job_set_id: + raise ValueError("Both queue and job_set_id must be provided.") - # Checks to ensure that either job_id is provided, - # or job_set_id AND queue is provided. - # ensure that the others have appropriate empty values. - - if job_id and not queue and not job_set_id: - request = submit_pb2.JobCancelRequest(job_id=job_id) - - elif job_set_id and queue and not job_id: - request = submit_pb2.JobCancelRequest(queue=queue, job_set_id=job_set_id) - + if job_id and queue and job_set_id: + request = submit_pb2.JobCancelRequest( + queue=queue, job_set_id=job_set_id, job_id=job_id + ) + return await self.submit_stub.CancelJobs(request) else: - raise ValueError("Either job_id or job_set_id and queue must be provided.") - - response = await self.submit_stub.CancelJobs(request) - return response + logger.warning( + "cancelling all jobs within a jobset via cancel_jobs is deprecated. " + "Use cancel_jobset instead." + ) + return await self.cancel_jobset(queue, job_set_id, []) # type: ignore async def cancel_jobset( self, @@ -186,14 +291,14 @@ async def cancel_jobset( async def reprioritize_jobs( self, new_priority: float, - job_ids: Optional[List[str]] = None, - job_set_id: Optional[str] = None, - queue: Optional[str] = None, + job_ids: Optional[List[str]], + job_set_id: str, + queue: str, ) -> submit_pb2.JobReprioritizeResponse: """Reprioritize jobs with new_priority value. Uses ReprioritizeJobs RPC to set a new priority on a list of jobs - or job set. + or job set (if job_ids are set to None or empty). :param new_priority: The new priority value for the jobs :param job_ids: A list of job ids to change priority of @@ -201,27 +306,48 @@ async def reprioritize_jobs( :param queue: The queue the jobs are in :return: JobReprioritizeResponse object. It is a map of strings. """ + if not queue or not job_set_id: + raise ValueError("Both queue and job_set_id must be provided.") - # Same as in cancel_jobs, ensure that either - # job_ids or job_set_id and queue is provided. - - if job_ids and not job_set_id and not queue: + if job_ids: request = submit_pb2.JobReprioritizeRequest( + queue=queue, + job_set_id=job_set_id, job_ids=job_ids, new_priority=new_priority, ) - elif job_set_id and queue and not job_ids: + else: request = submit_pb2.JobReprioritizeRequest( - job_set_id=job_set_id, queue=queue, + job_set_id=job_set_id, new_priority=new_priority, ) - else: - raise ValueError("Either job_ids or job_set_id and queue must be provided.") + return await self.submit_stub.ReprioritizeJobs(request) + + async def preempt_jobs( + self, + queue: str, + job_set_id: str, + job_id: str, + ) -> empty_pb2.Empty: + """Preempt jobs in a given queue. + + Uses the PreemptJobs RPC to preempt jobs. + + :param queue: The name of the queue + :param job_set_id: The name of the job set id + :param job_id: The id the job + :return: An empty response. + """ + if not queue or not job_set_id or not job_id: + raise ValueError("All of queue, job_set_id and job_id must be provided.") - response = await self.submit_stub.ReprioritizeJobs(request) + request = submit_pb2.JobPreemptRequest( + queue=queue, job_set_id=job_set_id, job_ids=[job_id] + ) + response = await self.submit_stub.PreemptJobs(request) return response async def create_queue(self, queue: submit_pb2.Queue) -> empty_pb2.Empty: diff --git a/client/python/armada_client/client.py b/client/python/armada_client/client.py index 9a39e225893..1cf36c7c2ed 100644 --- a/client/python/armada_client/client.py +++ b/client/python/armada_client/client.py @@ -5,6 +5,8 @@ https://armadaproject.io/api """ +from datetime import timedelta +import logging from typing import Dict, Iterator, List, Optional from google.protobuf import empty_pb2 @@ -15,11 +17,77 @@ submit_pb2, submit_pb2_grpc, health_pb2, + job_pb2, + job_pb2_grpc, ) from armada_client.event import Event from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 from armada_client.permissions import Permissions from armada_client.typings import JobState +from armada_client.iterators import ( + IteratorTimeoutException, + TimeoutIterator, +) + + +class _ResilientArmadaEventStream(Iterator[event_pb2.EventStreamMessage]): + def __init__( + self, + *, + queue: str, + job_set_id: str, + from_message_id: Optional[str] = None, + event_stub: event_pb2_grpc.EventStub, + event_timeout: timedelta, + ): + self._queue = queue + self._job_set_id = job_set_id + self._last_message_id = from_message_id or "" + self._stream = None + self._cancelled = False + self._event_stub = event_stub + self._event_timeout = event_timeout + self._timeout_iterator = None + + def __iter__(self) -> Iterator[event_pb2.EventStreamMessage]: + return self + + def __next__(self) -> event_pb2.EventStreamMessage: + while True: + if self._cancelled: + raise StopIteration() + if self._timeout_iterator is None: + self._timeout_iterator = self._re_connect() + try: + message = next(self._timeout_iterator) + self._last_message_id = message.id + return message + except IteratorTimeoutException: + self._timeout_iterator = None + + def _re_connect(self): + self._close_connection() + jsr = event_pb2.JobSetRequest( + queue=self._queue, + id=self._job_set_id, + from_message_id=self._last_message_id, + watch=True, + errorIfMissing=True, + ) + self._stream = self._event_stub.GetJobSetEvents(jsr) + return TimeoutIterator(self._stream, timeout=self._event_timeout) + + def _close_connection(self): + if self._stream is not None: + self._stream.cancel() + self._stream = None + + def cancel(self): + self._cancelled = True + self._close_connection() + + +logger = logging.getLogger("armada_client.asyncio_client") class ArmadaClient: @@ -32,9 +100,11 @@ class ArmadaClient: :return: an Armada client instance """ - def __init__(self, channel): + def __init__(self, channel, event_timeout: timedelta = timedelta(minutes=15)): self.submit_stub = submit_pb2_grpc.SubmitStub(channel) self.event_stub = event_pb2_grpc.EventStub(channel) + self.event_timeout = event_timeout + self.job_stub = job_pb2_grpc.JobsStub(channel) def get_job_events_stream( self, @@ -61,18 +131,13 @@ def get_job_events_stream( :param from_message_id: The from message id. :return: A job events stream for the job_set_id provided. """ - - if from_message_id is None: - from_message_id = "" - - jsr = event_pb2.JobSetRequest( + return _ResilientArmadaEventStream( queue=queue, - id=job_set_id, + job_set_id=job_set_id, from_message_id=from_message_id, - watch=True, - errorIfMissing=True, + event_stub=self.event_stub, + event_timeout=self.event_timeout, ) - return self.event_stub.GetJobSetEvents(jsr) @staticmethod def unmarshal_event_response(event: event_pb2.EventStreamMessage) -> Event: @@ -99,10 +164,47 @@ def event_health(self) -> health_pb2.HealthCheckResponse: """ return self.event_stub.Health(request=empty_pb2.Empty()) + def get_job_status(self, job_ids: List[str]) -> job_pb2.JobStatusResponse: + """ + Retrieves the status of a list of jobs from Armada. + + :param job_ids: A list of unique job identifiers. + :type job_ids: List[str] + + :returns: The response from the server containing the job status. + :rtype: JobStatusResponse + """ + req = job_pb2.JobStatusRequest(job_ids=job_ids) + return self.job_stub.GetJobStatus(req) + + def get_job_details(self, job_ids: List[str]) -> job_pb2.JobDetailsResponse: + """ + Retrieves the details of a job from Armada. + + :param job_ids: A list of unique job identifiers. + :type job_ids: List[str] + + :returns: The Armada job details response. + """ + req = job_pb2.JobDetailsRequest(job_ids=job_ids, expand_job_run=True) + return self.job_stub.GetJobDetails(req) + + def get_job_run_details(self, run_ids: List[str]) -> job_pb2.JobRunDetailsResponse: + """ + Retrieves the details of a job run from Armada. + + :param run_ids: A list of unique job run identifiers. + :type run_ids: List[str] + + :returns: The Armada run details response. + """ + req = job_pb2.JobRunDetailsRequest(run_ids=run_ids) + return self.job_stub.GetJobRunDetails(req) + def submit_jobs( self, queue: str, job_set_id: str, job_request_items ) -> submit_pb2.JobSubmitResponse: - """Submit a armada job. + """Submit an armada job. Uses SubmitJobs RPC to submit a job. @@ -120,36 +222,33 @@ def submit_jobs( def cancel_jobs( self, - queue: Optional[str] = None, + queue: str, + job_set_id: str, job_id: Optional[str] = None, - job_set_id: Optional[str] = None, ) -> submit_pb2.CancellationResult: """Cancel jobs in a given queue. - Uses the CancelJobs RPC to cancel jobs. Either job_id or - job_set_id is required. + Uses the CancelJobs RPC to cancel jobs. :param queue: The name of the queue - :param job_id: The name of the job id (this or job_set_id required) - :param job_set_id: An array of JobSubmitRequestItems. (this or job_id required) + :param job_set_id: The name of the job set id + :param job_id: The name of the job id (optional), if empty - cancel all jobs :return: A CancellationResult object. """ + if not queue or not job_set_id: + raise ValueError("Both queue and job_set_id must be provided.") - # Checks to ensure that either job_id is provided, - # or job_set_id AND queue is provided. - # ensure that the others have appropriate empty values. - - if job_id and not queue and not job_set_id: - request = submit_pb2.JobCancelRequest(job_id=job_id) - - elif job_set_id and queue and not job_id: - request = submit_pb2.JobCancelRequest(queue=queue, job_set_id=job_set_id) - + if job_id and queue and job_set_id: + request = submit_pb2.JobCancelRequest( + queue=queue, job_set_id=job_set_id, job_id=job_id + ) + return self.submit_stub.CancelJobs(request) else: - raise ValueError("Either job_id or job_set_id and queue must be provided.") - - response = self.submit_stub.CancelJobs(request) - return response + logger.warning( + "cancelling all jobs within a jobset via cancel_jobs is deprecated. " + "Use cancel_jobset instead." + ) + return self.cancel_jobset(queue, job_set_id, []) # type: ignore def cancel_jobset( self, @@ -182,14 +281,14 @@ def cancel_jobset( def reprioritize_jobs( self, new_priority: float, - job_ids: Optional[List[str]] = None, - job_set_id: Optional[str] = None, - queue: Optional[str] = None, + job_ids: Optional[List[str]], + job_set_id: str, + queue: str, ) -> submit_pb2.JobReprioritizeResponse: """Reprioritize jobs with new_priority value. Uses ReprioritizeJobs RPC to set a new priority on a list of jobs - or job set. + or job set (if job_ids are set to None or empty). :param new_priority: The new priority value for the jobs :param job_ids: A list of job ids to change priority of @@ -197,28 +296,48 @@ def reprioritize_jobs( :param queue: The queue the jobs are in :return: JobReprioritizeResponse object. It is a map of strings. """ + if not queue or not job_set_id: + raise ValueError("Both queue and job_set_id must be provided.") - # Same as in cancel_jobs, ensure that either - # job_ids or job_set_id and queue is provided. - - if job_ids and not job_set_id and not queue: + if job_ids: request = submit_pb2.JobReprioritizeRequest( + queue=queue, + job_set_id=job_set_id, job_ids=job_ids, new_priority=new_priority, ) - elif job_set_id and queue and not job_ids: + else: request = submit_pb2.JobReprioritizeRequest( - job_set_id=job_set_id, queue=queue, + job_set_id=job_set_id, new_priority=new_priority, ) - else: - raise ValueError("Either job_ids or job_set_id and queue must be provided.") + return self.submit_stub.ReprioritizeJobs(request) - response = self.submit_stub.ReprioritizeJobs(request) - return response + def preempt_jobs( + self, + queue: str, + job_set_id: str, + job_id: str, + ) -> empty_pb2.Empty: + """Preempt jobs in a given queue. + + Uses the PreemptJobs RPC to preempt jobs. + + :param queue: The name of the queue + :param job_set_id: The name of the job set id + :param job_id: The id the job + :return: An empty response. + """ + if not queue or not job_set_id or not job_id: + raise ValueError("All of queue, job_set_id and job_id must be provided.") + + request = submit_pb2.JobPreemptRequest( + queue=queue, job_set_id=job_set_id, job_ids=[job_id] + ) + return self.submit_stub.PreemptJobs(request) def create_queue(self, queue: submit_pb2.Queue) -> empty_pb2.Empty: """ diff --git a/client/python/armada_client/iterators.py b/client/python/armada_client/iterators.py new file mode 100644 index 00000000000..65e09cf949a --- /dev/null +++ b/client/python/armada_client/iterators.py @@ -0,0 +1,194 @@ +# Internal iterators with timeout, to detect client disconnection +from __future__ import annotations + +import datetime +import queue +import asyncio +import threading +from typing import ( + Any, + AsyncIterator, + Iterator, + Optional, + Type, + TypeVar, + Generic, +) + + +class IteratorTimeoutException(Exception): + def __init__(self) -> None: + super().__init__("Timeout while waiting for next element in iterator") + + +T = TypeVar("T") + + +def _utcnow(): + # It's a method, so we can mock out time, if we need to. + return datetime.datetime.now(datetime.timezone.utc) + + +class _PollerMixIn(Generic[T]): + def __init__( + self, + timeout: datetime.timedelta, + poll_interval: datetime.timedelta, + stop_termination_exception: Type[Exception], + ) -> None: + self._timeout = timeout + self._poll_interval = poll_interval + self._stop_termination_exception = stop_termination_exception + self._done = False + + def stop(self): + """ + Mark underlying thread for termination (on next result being yielded). + Immediately stop consuming the iterator. + """ + self._done = True + + def _should_keep_polling( + self, started_at: datetime.datetime, data: Optional[Any] + ) -> bool: + """ + Should iterator keep polling for new data. + """ + timed_out = _utcnow() - started_at > self._timeout + should_terminate = data is not None or self._done or timed_out + return not should_terminate + + def _handle_result(self, data: Optional[T]) -> T: + """ + Propagate result from underlying iterator, raise exceptions if any. + """ + + # propagate any exceptions including StopIteration + if isinstance(data, BaseException): + self._done = True + raise data + # raise timeout exception if no data was received + if data is None and not self._done: + raise IteratorTimeoutException() + + if data is None: + raise self._stop_termination_exception() + + return data + + +class TimeoutIterator(Generic[T], _PollerMixIn[T], Iterator[T]): + """ + INTERNAL (no compatibility guarantees between library releases). + + Wraps an iterator with timeout functionality. + Raises IteratorTimeoutException if timeout. + Uses background thread, to implement timeout functionality. + + :param iterator: iterator to wrap + :param timeout: timeout duration + :param poll_interval: interval to poll for new data + """ + + def __init__( + self, + iterator: Iterator[T], + timeout: datetime.timedelta, + poll_interval: datetime.timedelta = datetime.timedelta(seconds=1), + ): + _PollerMixIn.__init__(self, timeout, poll_interval, StopIteration) + self._iterator = iterator + self._buffer = queue.Queue() + self._thread = threading.Thread(target=self.__consume, daemon=True) + self._thread.start() + + def __iter__(self) -> TimeoutIterator[T]: + return self + + def __next__(self) -> T: + """ + Yields result from underlying iterator. + Raises IteratorTimeoutException if timeout. + """ + now = _utcnow() + data = None + while self._should_keep_polling(now, data): + try: + data = self._buffer.get(timeout=self._poll_interval.total_seconds()) + except queue.Empty: + pass + + return self._handle_result(data) + + def __consume(self): + """ + Consume from underlying iterator and put data + (including any exceptions) into buffer. + """ + try: + for data in self._iterator: + self._buffer.put(data) + if self._done: + break + self._buffer.put(self._stop_termination_exception()) + except BaseException as e: + self._buffer.put(e) + + +class AsyncTimeoutIterator(Generic[T], _PollerMixIn, AsyncIterator[T]): + """ + INTERNAL (no compatibility guarantees between library releases). + + Wraps an async iterator with timeout functionality. + Raises IteratorTimeoutException if timeout. + + :param iterator: iterator to wrap + :param timeout: timeout duration + :param poll_interval: interval to poll for new data + """ + + def __init__( + self, + iterator: AsyncIterator[T], + timeout: datetime.timedelta, + poll_interval: datetime.timedelta = datetime.timedelta(seconds=1), + ): + _PollerMixIn.__init__(self, timeout, poll_interval, StopAsyncIteration) + self._iterator = iterator + self._buffer = asyncio.Queue() + self._task = asyncio.get_event_loop().create_task(self.__consume()) + + def __aiter__(self): + return self + + async def __anext__(self): + """ + Yields result from underlying iterator. + Raises IteratorTimeoutException if timeout. + """ + + now = _utcnow() + data = None + while self._should_keep_polling(now, data): + try: + data = await asyncio.wait_for( + self._buffer.get(), self._timeout.total_seconds() + ) + except asyncio.TimeoutError: + pass + + return self._handle_result(data) + + async def __consume(self): + """ + Consume from underlying iterator and put data + (including any exceptions) into buffer. + """ + try: + async for data in self._iterator: + await self._buffer.put(data) + if self._done: + break + await self._buffer.put(self._stop_termination_exception()) + except BaseException as e: + await self._buffer.put(e) diff --git a/client/python/examples/preempting.py b/client/python/examples/preempting.py new file mode 100644 index 00000000000..6d3ff5a3873 --- /dev/null +++ b/client/python/examples/preempting.py @@ -0,0 +1,127 @@ +""" +A full example of preempting jobs +""" + +import os +import uuid + +import grpc +from armada_client.client import ArmadaClient +from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 +from armada_client.k8s.io.apimachinery.pkg.api.resource import ( + generated_pb2 as api_resource, +) + + +def create_dummy_job(client: ArmadaClient): + """ + Create a dummy job with a single container. + """ + + # For infomation on where this comes from, + # see https://github.com/kubernetes/api/blob/master/core/v1/generated.proto + pod = core_v1.PodSpec( + containers=[ + core_v1.Container( + name="container1", + image="index.docker.io/library/ubuntu:latest", + args=["sleep", "10s"], + securityContext=core_v1.SecurityContext(runAsUser=1000), + resources=core_v1.ResourceRequirements( + requests={ + "cpu": api_resource.Quantity(string="120m"), + "memory": api_resource.Quantity(string="510Mi"), + }, + limits={ + "cpu": api_resource.Quantity(string="120m"), + "memory": api_resource.Quantity(string="510Mi"), + }, + ), + ) + ], + ) + + return [ + client.create_job_request_item(priority=1, namespace="default", pod_spec=pod) + ] + + +def preempting_jobs_example(client, queue): + """ + Submits and Preempts Job + """ + + # Create the PodSpec for the job + job_request_items1 = create_dummy_job(client) + + job_set_id1 = f"set-{uuid.uuid1()}" + + resp1 = client.submit_jobs( + queue=queue, job_set_id=job_set_id1, job_request_items=job_request_items1 + ) + + # Gets the job_id of the first job in job_request_items1 + # This job is preempted using the job_id + job_id = resp1.job_response_items[0].job_id + print(f"preempting {queue} {job_set_id1} {job_id}") + client.preempt_jobs(queue=queue, job_set_id=job_set_id1, job_id=job_id) + + +def quick_create_queue(client, queue): + """ + Creates a queue. + + Will skip if the queue already exists. + """ + + queue_req = client.create_queue_request(name=queue, priority_factor=1) + + # Make sure we handle the queue already existing + try: + client.create_queue(queue_req) + + # Handle the error we expect to maybe occur + except grpc.RpcError as e: + code = e.code() + if code == grpc.StatusCode.ALREADY_EXISTS: + print(f"Queue {queue} already exists") + client.update_queue(queue_req) + else: + raise e + + +def workflow(): + """ + Starts a workflow, which includes: + - Creating a queue and job + - Preempting the job with its job-id, queue and jobset-id + """ + + # The queue and job_set_id that will be used for all jobs + queue = "test-preempting" + + # Ensures that the correct channel type is generated + if DISABLE_SSL: + channel = grpc.insecure_channel(f"{HOST}:{PORT}") + else: + channel_credentials = grpc.ssl_channel_credentials() + channel = grpc.secure_channel( + f"{HOST}:{PORT}", + channel_credentials, + ) + + client = ArmadaClient(channel) + quick_create_queue(client, queue) + + preempting_jobs_example(client, queue) + + +if __name__ == "__main__": + # Note that the form of ARMADA_SERVER should be something like + # domain.com, localhost, or 0.0.0.0 + DISABLE_SSL = os.environ.get("DISABLE_SSL", False) + HOST = os.environ.get("ARMADA_SERVER", "localhost") + PORT = os.environ.get("ARMADA_PORT", "50051") + + workflow() + print("Completed Workflow") diff --git a/client/python/pyproject.toml b/client/python/pyproject.toml index f477f7bea9b..ab4cdc84a83 100644 --- a/client/python/pyproject.toml +++ b/client/python/pyproject.toml @@ -1,10 +1,10 @@ [project] name = "armada_client" -version = "0.2.9" +version = "0.3.4" description = "Armada gRPC API python client" readme = "README.md" requires-python = ">=3.7" -dependencies = ["grpcio>=1.46.3", "grpcio-tools>=1.46.3", "mypy-protobuf>=3.2.0", "protobuf>=3.20.3"] +dependencies = ["grpcio==1.58.0", "grpcio-tools==1.58.0", "mypy-protobuf>=3.2.0", "protobuf>=3.20,<5.0" ] license = { text = "Apache Software License" } authors = [{ name = "G-Research Open Source Software", email = "armada@armadaproject.io" }] diff --git a/client/python/tests/integration/test_no_auth.py b/client/python/tests/integration/test_no_auth.py index a970e952ab0..a3c30569856 100644 --- a/client/python/tests/integration/test_no_auth.py +++ b/client/python/tests/integration/test_no_auth.py @@ -105,6 +105,7 @@ def get_queue(): def create_queue(client: ArmadaClient, queue_name): queue = client.create_queue_request(name=queue_name, priority_factor=1) client.create_queue(queue) + time.sleep(10) wait_for(client, queue=queue_name) @@ -148,7 +149,11 @@ def test_submit_job_and_cancel_by_id(client: ArmadaClient, queue_name): wait_for(client, queue=queue_name, job_set_id=job_set_name) - cancelled_message = client.cancel_jobs(job_id=jobs.job_response_items[0].job_id) + cancelled_message = client.cancel_jobs( + queue=queue_name, + job_id=jobs.job_response_items[0].job_id, + job_set_id=job_set_name, + ) assert cancelled_message.cancelled_ids[0] == jobs.job_response_items[0].job_id @@ -165,7 +170,9 @@ def test_submit_job_and_cancel_by_job_id(client: ArmadaClient, queue_name): wait_for(client, queue=queue_name, job_set_id=job_set_name) - cancelled_message = client.cancel_jobs(job_id=job_id) + cancelled_message = client.cancel_jobs( + queue=queue_name, job_set_id=job_set_name, job_id=job_id + ) assert cancelled_message.cancelled_ids[0] == job_id diff --git a/client/python/tests/unit/server_mock.py b/client/python/tests/unit/server_mock.py index bbadc20964f..8d19101203b 100644 --- a/client/python/tests/unit/server_mock.py +++ b/client/python/tests/unit/server_mock.py @@ -1,11 +1,16 @@ from google.protobuf import empty_pb2 + from armada_client.armada import ( submit_pb2_grpc, submit_pb2, event_pb2, event_pb2_grpc, health_pb2, + job_pb2_grpc, + job_pb2, ) +from armada_client.armada.job_pb2 import JobRunState +from armada_client.armada.submit_pb2 import JobState class SubmitService(submit_pb2_grpc.SubmitServicer): @@ -39,6 +44,9 @@ def CancelJobs(self, request, context): def CancelJobSet(self, request, context): return empty_pb2.Empty() + def PreemptJobs(self, request, context): + return empty_pb2.Empty() + def ReprioritizeJobs(self, request, context): new_priority = request.new_priority if len(request.job_ids) > 0: @@ -98,3 +106,46 @@ def Health(self, request, context): return health_pb2.HealthCheckResponse( status=health_pb2.HealthCheckResponse.SERVING ) + + +class QueryAPIService(job_pb2_grpc.JobsServicer): + DEFAULT_JOB_DETAILS = { + "queue": "test_queue", + "jobset": "test_jobset", + "namespace": "test_namespace", + "state": JobState.RUNNING, + "cancel_reason": "", + "latest_run_id": "0", + } + + DEFAULT_JOB_RUN_DETAILS = { + "job_id": "0", + "cluster": "test_cluster", + "node": "test_node", + "state": JobRunState.RUN_STATE_RUNNING, + } + + def GetJobStatus(self, request, context): + return job_pb2.JobStatusResponse( + job_states={job: JobState.RUNNING for job in request.job_ids} + ) + + def GetJobDetails(self, request, context): + return job_pb2.JobDetailsResponse( + job_details={ + job: job_pb2.JobDetails( + job_id=job, **QueryAPIService.DEFAULT_JOB_DETAILS + ) + for job in request.job_ids + } + ) + + def GetJobRunDetails(self, request, context): + return job_pb2.JobRunDetailsResponse( + job_run_details={ + run: job_pb2.JobRunDetails( + run_id=run, **QueryAPIService.DEFAULT_JOB_RUN_DETAILS + ) + for run in request.run_ids + } + ) diff --git a/client/python/tests/unit/test_asyncio_client.py b/client/python/tests/unit/test_asyncio_client.py index 1567b4f6100..6f4d8709c23 100644 --- a/client/python/tests/unit/test_asyncio_client.py +++ b/client/python/tests/unit/test_asyncio_client.py @@ -4,9 +4,17 @@ import pytest import pytest_asyncio -from server_mock import EventService, SubmitService - -from armada_client.armada import event_pb2_grpc, submit_pb2_grpc, submit_pb2, health_pb2 +from armada_client.typings import JobState +from armada_client.armada.job_pb2 import JobRunState +from server_mock import EventService, SubmitService, QueryAPIService + +from armada_client.armada import ( + event_pb2_grpc, + submit_pb2_grpc, + submit_pb2, + health_pb2, + job_pb2_grpc, +) from armada_client.asyncio_client import ArmadaAsyncIOClient from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 from armada_client.k8s.io.apimachinery.pkg.api.resource import ( @@ -14,7 +22,6 @@ ) from armada_client.permissions import Permissions, Subject -from armada_client.typings import JobState @pytest.fixture @@ -22,6 +29,7 @@ def server_mock(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) submit_pb2_grpc.add_SubmitServicer_to_server(SubmitService(), server) event_pb2_grpc.add_EventServicer_to_server(EventService(), server) + job_pb2_grpc.add_JobsServicer_to_server(QueryAPIService(), server) server.add_insecure_port("[::]:50051") server.start() yield @@ -177,21 +185,11 @@ async def test_cancel_jobs(aio_client): await test_create_queue(aio_client) await test_submit_job(aio_client) - # Test that the right combination of jobid or jobsetid and queue is used - # also check that the Value error is raised - with pytest.raises(ValueError): - await aio_client.cancel_jobs( - queue="test", job_id="job-1", job_set_id="job-set-1" - ) - - resp = await aio_client.cancel_jobs(job_id="job-1") - + resp = await aio_client.cancel_jobs( + queue="test", job_id="job-1", job_set_id="job-set-1" + ) assert resp.cancelled_ids[0] == "job-1" - resp = await aio_client.cancel_jobs(queue="test", job_set_id="job-set-1") - - assert len(list(resp.cancelled_ids)) > 0 - @pytest.mark.asyncio async def test_cancel_jobset(aio_client): @@ -204,6 +202,13 @@ async def test_cancel_jobset(aio_client): ) +@pytest.mark.asyncio +async def test_preempt_jobs(aio_client): + await test_create_queue(aio_client) + await test_submit_job(aio_client) + await aio_client.preempt_jobs(queue="test", job_id="job-1", job_set_id="job-set-1") + + @pytest.mark.asyncio async def test_update_queue(aio_client): queue = aio_client.create_queue_request(name="test", priority_factor=1) @@ -266,20 +271,10 @@ async def test_update_queues_full(aio_client): @pytest.mark.asyncio async def test_reprioritize_jobs(aio_client): - # Similar to test_cancel_jobs(), test that the right combination of jobid - # or jobsetid and queue is used - # also check that the Value error is raised - - with pytest.raises(ValueError): - await aio_client.reprioritize_jobs( - queue="test", - job_ids=["job-1"], - job_set_id="job-set-1", - new_priority=1, - ) - resp = await aio_client.reprioritize_jobs( + queue="test", job_ids=["job-1"], + job_set_id="job-set-1", new_priority=1, ) @@ -287,6 +282,7 @@ async def test_reprioritize_jobs(aio_client): resp = await aio_client.reprioritize_jobs( queue="test", + job_ids=None, job_set_id="job-set-1", new_priority=1, ) @@ -314,3 +310,36 @@ async def test_health_submit(aio_client): async def test_health_event(aio_client): health = await aio_client.event_health() assert health.SERVING == health_pb2.HealthCheckResponse.SERVING + + +@pytest.mark.asyncio +async def test_job_status(aio_client): + await test_create_queue(aio_client) + await test_submit_job(aio_client) + + job_status_response = await aio_client.get_job_status(["job-1"]) + assert job_status_response.job_states["job-1"] == submit_pb2.JobState.RUNNING + + +@pytest.mark.asyncio +async def test_job_details(aio_client): + await test_create_queue(aio_client) + await test_submit_job(aio_client) + + job_details_response = await aio_client.get_job_details(["job-1"]) + job_details = job_details_response.job_details + assert job_details["job-1"].state == submit_pb2.JobState.RUNNING + assert job_details["job-1"].job_id == "job-1" + assert job_details["job-1"].queue == "test_queue" + + +@pytest.mark.asyncio +async def test_job_run_details(aio_client): + await test_create_queue(aio_client) + await test_submit_job(aio_client) + + run_details_response = await aio_client.get_job_run_details(["run-1"]) + run_details = run_details_response.job_run_details + assert run_details["run-1"].state == JobRunState.RUN_STATE_RUNNING + assert run_details["run-1"].run_id == "run-1" + assert run_details["run-1"].cluster == "test_cluster" diff --git a/client/python/tests/unit/test_client.py b/client/python/tests/unit/test_client.py index c012d4a38f9..70eba72439b 100644 --- a/client/python/tests/unit/test_client.py +++ b/client/python/tests/unit/test_client.py @@ -3,9 +3,17 @@ import grpc import pytest -from server_mock import EventService, SubmitService - -from armada_client.armada import event_pb2_grpc, submit_pb2_grpc, submit_pb2, health_pb2 +from armada_client.typings import JobState +from armada_client.armada.job_pb2 import JobRunState +from server_mock import EventService, SubmitService, QueryAPIService + +from armada_client.armada import ( + event_pb2_grpc, + submit_pb2_grpc, + submit_pb2, + health_pb2, + job_pb2_grpc, +) from armada_client.client import ArmadaClient from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1 from armada_client.k8s.io.apimachinery.pkg.api.resource import ( @@ -13,7 +21,6 @@ ) from armada_client.permissions import Permissions, Subject -from armada_client.typings import JobState @pytest.fixture(scope="session", autouse=True) @@ -21,6 +28,7 @@ def server_mock(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) submit_pb2_grpc.add_SubmitServicer_to_server(SubmitService(), server) event_pb2_grpc.add_EventServicer_to_server(EventService(), server) + job_pb2_grpc.add_JobsServicer_to_server(QueryAPIService(), server) server.add_insecure_port("[::]:50051") server.start() @@ -161,22 +169,20 @@ def test_delete_queue(): tester.delete_queue("test") -def test_cancel_jobs(): +def test_preempt_jobs(): test_create_queue() test_submit_job() - # Test that the right combination of jobid or jobsetid and queue is used - # also check that the Value error is raised - with pytest.raises(ValueError): - tester.cancel_jobs(queue="test", job_id="job-1", job_set_id="job-set-1") + tester.preempt_jobs(queue="test", job_id="job-1", job_set_id="job-set-1") - resp = tester.cancel_jobs(job_id="job-1") - assert resp.cancelled_ids[0] == "job-1" +def test_cancel_jobs(): + test_create_queue() + test_submit_job() - resp = tester.cancel_jobs(queue="test", job_set_id="job-set-1") + resp = tester.cancel_jobs(queue="test", job_id="job-1", job_set_id="job-set-1") - assert len(list(resp.cancelled_ids)) > 0 + assert resp.cancelled_ids[0] == "job-1" def test_cancel_jobset(): @@ -246,20 +252,10 @@ def test_update_queues_full(): def test_reprioritize_jobs(): - # Similar to test_cancel_jobs(), test that the right combination of jobid - # or jobsetid and queue is used - # also check that the Value error is raised - - with pytest.raises(ValueError): - tester.reprioritize_jobs( - queue="test", - job_ids=["job-1"], - job_set_id="job-set-1", - new_priority=1, - ) - resp = tester.reprioritize_jobs( + queue="test", job_ids=["job-1"], + job_set_id="job-set-1", new_priority=1, ) @@ -267,6 +263,7 @@ def test_reprioritize_jobs(): resp = tester.reprioritize_jobs( queue="test", + job_ids=None, job_set_id="job-set-1", new_priority=1, ) @@ -289,3 +286,31 @@ def test_health_submit(): def test_health_event(): health = tester.event_health() assert health.SERVING == health_pb2.HealthCheckResponse.SERVING + + +def test_job_status(): + test_create_queue() + test_submit_job() + + job_status_response = tester.get_job_status(["job-1"]) + assert job_status_response.job_states["job-1"] == submit_pb2.JobState.RUNNING + + +def test_job_details(): + test_create_queue() + test_submit_job() + + job_details = tester.get_job_details(["job-1"]).job_details + assert job_details["job-1"].state == submit_pb2.JobState.RUNNING + assert job_details["job-1"].job_id == "job-1" + assert job_details["job-1"].queue == "test_queue" + + +def test_job_run_details(): + test_create_queue() + test_submit_job() + + run_details = tester.get_job_run_details(["run-1"]).job_run_details + assert run_details["run-1"].state == JobRunState.RUN_STATE_RUNNING + assert run_details["run-1"].run_id == "run-1" + assert run_details["run-1"].cluster == "test_cluster" diff --git a/client/python/tests/unit/test_event.py b/client/python/tests/unit/test_event.py index ed0402f46e3..38069abf7b9 100644 --- a/client/python/tests/unit/test_event.py +++ b/client/python/tests/unit/test_event.py @@ -32,7 +32,6 @@ def __init__(self, name): [ ("submitted", EventType.submitted), ("queued", EventType.queued), - ("duplicate_found", EventType.duplicate_found), ("leased", EventType.leased), ("lease_returned", EventType.lease_returned), ("lease_expired", EventType.lease_expired), @@ -48,8 +47,6 @@ def __init__(self, name): ("utilisation", EventType.utilisation), ("ingress_info", EventType.ingress_info), ("reprioritizing", EventType.reprioritizing), - ("updated", EventType.updated), - ("failedCompressed", EventType.failedCompressed), ], ) def test_event_class(name, event_type): @@ -68,7 +65,6 @@ def test_event_class(name, event_type): [ "submitted", "queued", - "duplicate_found", "leased", "lease_returned", "lease_expired", @@ -84,8 +80,6 @@ def test_event_class(name, event_type): "utilisation", "ingress_info", "reprioritizing", - "updated", - "failedCompressed", ], ) def test_unmarshal_event_response(name): diff --git a/client/python/tests/unit/test_iterators.py b/client/python/tests/unit/test_iterators.py new file mode 100644 index 00000000000..c9d7df63e4c --- /dev/null +++ b/client/python/tests/unit/test_iterators.py @@ -0,0 +1,111 @@ +import asyncio +import datetime +import time +import pytest + +from armada_client.iterators import ( + AsyncTimeoutIterator, + IteratorTimeoutException, + TimeoutIterator, +) + + +def test_timeout_iterator_propagates_results(): + iterator = iter([1, 2, 3]) + timeout_iterator = TimeoutIterator[int]( + iterator, timeout=datetime.timedelta(seconds=1) + ) + assert list(timeout_iterator) == [1, 2, 3] + + +def test_timeout_iterator_propagates_underlying_error(): + def error_generator(): + yield 1 + raise ValueError("Error") + + timeout_iterator = TimeoutIterator[int]( + error_generator(), timeout=datetime.timedelta(seconds=1) + ) + + with pytest.raises(ValueError): + list(timeout_iterator) + + +def test_timeout_iterator_raises_timeout_error(): + def timeout_generator(): + yield 1 + time.sleep(60) + + timeout_iterator = TimeoutIterator[int]( + timeout_generator(), timeout=datetime.timedelta(milliseconds=1) + ) + + assert next(timeout_iterator) == 1 + with pytest.raises(IteratorTimeoutException): + next(timeout_iterator) + + +def test_timeout_iterator_stop_stops_iteration_without_error(): + def generator(): + yield 1 + + timeout_iterator = TimeoutIterator[int]( + generator(), + timeout=datetime.timedelta(seconds=1), + ) + timeout_iterator.stop() + + assert list(timeout_iterator) == [] + + +async def test_async_timeout_iterator_propagates_results(): + data = [1, 2, 3] + + async def aiter(): + for item in data: + yield item + + timeout_iterator = AsyncTimeoutIterator[int]( + aiter(), timeout=datetime.timedelta(seconds=1) + ) + assert [i async for i in timeout_iterator] == [1, 2, 3] + + +async def test_async_timeout_iterator_propagates_underlying_error(): + async def error_generator(): + yield 1 + raise ValueError("Error") + + timeout_iterator = AsyncTimeoutIterator[int]( + error_generator(), timeout=datetime.timedelta(seconds=1) + ) + + with pytest.raises(ValueError): + [i async for i in timeout_iterator] + + +async def test_async_timeout_iterator_raises_timeout_error(): + async def timeout_generator(): + yield 1 + await asyncio.sleep(60) + + timeout_iterator = AsyncTimeoutIterator[int]( + timeout_generator(), timeout=datetime.timedelta(milliseconds=1) + ) + + assert await timeout_iterator.__anext__() == 1 + with pytest.raises(IteratorTimeoutException): + await timeout_iterator.__anext__() + + +async def test_async_timeout_iterator_stop_stops_iteration_without_error(): + async def generator(): + yield 1 + + timeout_iterator = AsyncTimeoutIterator[int]( + generator(), + timeout=datetime.timedelta(seconds=1), + ) + timeout_iterator.stop() + + assert [i async for i in timeout_iterator] == [] diff --git a/cmd/armadactl/README.md b/cmd/armadactl/README.md deleted file mode 100644 index 42174649619..00000000000 --- a/cmd/armadactl/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Armadactl - -Armadactl is a command-line tool used for managing jobs in the Armada workload orchestration system. It provides functionality for creating, updating, and deleting jobs, as well as monitoring job status and resource usage. - -## Usage -Once Armadactl is successfully installed, you can use it to execute Armada subcommands by running the following command: -```bash -armadactl [subcommand] [flags] -``` - -### Here are the available subcommands: -- analyze : The analyze subcommand can be used to analyze a set of manifests and provide a report on their compatibility with a given Kubernetes cluster version. -```bash -armadactl analyze [path/to/manifests] [flags] -``` -- cancel : The cancel subcommand can be used to cancel a running Armada deployment. -```bash -armadactl cancel [deployment_name] [flags] -``` -- create : The create subcommand can be used to create a new Armada deployment. -```bash -armadactl create [path/to/manifests] [flags] -``` -- delete : The delete subcommand can be used to delete an existing Armada deployment. -```bash -armadactl delete [deployment_name] [flags] -``` -- update : The update subcommand can be used to update an existing Armada deployment. -```bash -armadactl update [deployment_name] [path/to/new_manifests] [flags] -``` -- describe : The describe subcommand can be used to get detailed information about an existing Armada deployment. -```bash -armadactl describe [deployment_name] [flags] -``` -- kube : The kube subcommand can be used to generate a Kubernetes kubeconfig file for a specific deployment. -```bash -armadactl kube [deployment_name] [flags] -``` -- reprioritize : The reprioritize subcommand can be used to change the priority of a running Armada deployment. -```bash -armadactl reprioritize [deployment_name] [new_priority] [flags] -``` -- resources : The resources subcommand can be used to get information about the resources used by an Armada deployment. -```bash -armadactl resources [deployment_name] [flags] -``` -- submit : The submit subcommand can be used to submit a set of manifests to an existing Armada deployment. -```bash -armadactl submit [deployment_name] [path/to/new_manifests] [flags] -``` -- version : The version subcommand can be used to get the version of Armada that is currently installed. -```bash -armadactl version [flags] -``` -- watch : The watch subcommand can be used to watch the status of an Armada deployment. -```bash -armadactl watch [deployment_name] [flags] -``` -- queue-report : This subcommand retrieves a report of the current scheduling status of all queues in the Armada cluster. -```bash -armadactl queue-report -``` -- job-report : This subcommand retrieves a report of the current scheduling status of all jobs in the Armada cluster. -```bash -armadactl job-report -``` - -- scheduling-report : This subcommand retrieves a report of the current scheduling status in the Armada cluster. -```bash -armadactl scheduling-report -``` - -For a full list of subcommands and options, you can run **armadactl --help**. diff --git a/cmd/armadactl/cmd/analyze.go b/cmd/armadactl/cmd/analyze.go deleted file mode 100644 index edba2942591..00000000000 --- a/cmd/armadactl/cmd/analyze.go +++ /dev/null @@ -1,26 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/internal/armadactl" -) - -func analyzeCmd() *cobra.Command { - a := armadactl.New() - cmd := &cobra.Command{ - Use: "analyze ", - Aliases: []string{"gen"}, - Short: "Analyze job events in job set.", - Args: cobra.ExactArgs(2), - PreRunE: func(cmd *cobra.Command, args []string) error { - return initParams(cmd, a.Params) - }, - RunE: func(cmd *cobra.Command, args []string) error { - queue := args[0] - jobSetId := args[1] - return a.Analyze(queue, jobSetId) - }, - } - return cmd -} diff --git a/cmd/armadactl/cmd/cancel.go b/cmd/armadactl/cmd/cancel.go index e3b13a5abc2..80bff893c96 100644 --- a/cmd/armadactl/cmd/cancel.go +++ b/cmd/armadactl/cmd/cancel.go @@ -7,24 +7,54 @@ import ( ) func cancelCmd() *cobra.Command { - a := armadactl.New() cmd := &cobra.Command{ Use: "cancel", Short: "Cancels jobs in armada.", - Long: `Cancels jobs either by jobId or by combination of queue & job set.`, + Long: `Cancels jobs individually using job ID or in bulk as part of a job set.`, Args: cobra.ExactArgs(0), + } + cmd.AddCommand( + cancelJobCmd(), + cancelJobSetCmd(), + ) + return cmd +} + +func cancelJobCmd() *cobra.Command { + a := armadactl.New() + cmd := &cobra.Command{ + Use: "job ", + Short: "Cancels job in armada.", + Long: `Cancel job by providing queue, job-set and job-id.`, + Args: cobra.ExactArgs(3), + PreRunE: func(cmd *cobra.Command, args []string) error { + return initParams(cmd, a.Params) + }, + RunE: func(cmd *cobra.Command, args []string) error { + queue := args[0] + jobSetId := args[1] + jobId := args[2] + return a.CancelJob(queue, jobSetId, jobId) + }, + } + return cmd +} + +func cancelJobSetCmd() *cobra.Command { + a := armadactl.New() + cmd := &cobra.Command{ + Use: "job-set ", + Short: "Cancels job-set in armada.", + Long: `Cancels job-set by providing queue, job-set.`, + Args: cobra.ExactArgs(2), PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) }, RunE: func(cmd *cobra.Command, args []string) error { - jobId, _ := cmd.Flags().GetString("jobId") - queue, _ := cmd.Flags().GetString("queue") - jobSetId, _ := cmd.Flags().GetString("jobSet") - return a.Cancel(queue, jobSetId, jobId) + queue := args[0] + jobSetId := args[1] + return a.CancelJobSet(queue, jobSetId) }, } - cmd.Flags().String("jobId", "", "job to cancel") - cmd.Flags().String("queue", "", "queue to cancel jobs from (requires job set to be specified)") - cmd.Flags().String("jobSet", "", "jobSet to cancel (requires queue to be specified)") return cmd } diff --git a/cmd/armadactl/cmd/cancel_test.go b/cmd/armadactl/cmd/cancel_test.go index f5344d68829..adac915f1d3 100644 --- a/cmd/armadactl/cmd/cancel_test.go +++ b/cmd/armadactl/cmd/cancel_test.go @@ -18,9 +18,9 @@ func TestCancel(t *testing.T) { jobSet string }{ "default flags": {nil, "", "", ""}, - "valid jobId": {[]flag{{"jobId", "jobId1"}}, "jobId1", "", ""}, + "valid job-id": {[]flag{{"job-id", "jobId1"}}, "jobId1", "", ""}, "valid queue": {[]flag{{"queue", "queue1,jobSet1"}}, "", "queue1", "jobSet1"}, - "valid jobSet": {[]flag{{"jobSet", "jobSet1"}}, "", "", "jobSet1"}, + "valid job-set": {[]flag{{"job-set", "jobSet1"}}, "", "", "jobSet1"}, } for name, test := range tests { t.Run(name, func(t *testing.T) { @@ -31,20 +31,20 @@ func TestCancel(t *testing.T) { a.Out = io.Discard if len(test.jobId) > 0 { - jobIdFlag, err1 := cmd.Flags().GetString("jobId") + jobIdFlag, err1 := cmd.Flags().GetString("job-id") require.Error(t, err1) require.Equal(t, test.jobId, jobIdFlag) } if len(test.queue) > 0 { queueFlag, err1 := cmd.Flags().GetString("queue") - jobSetFlag, err2 := cmd.Flags().GetString("jobSet") + jobSetFlag, err2 := cmd.Flags().GetString("job-set") require.Error(t, err1) require.Error(t, err2) require.Equal(t, test.queue, queueFlag) require.Equal(t, test.jobSet, jobSetFlag) } if len(test.jobSet) > 0 { - jobSetFlag, err1 := cmd.Flags().GetString("jobSet") + jobSetFlag, err1 := cmd.Flags().GetString("job-set") require.Error(t, err1) require.Equal(t, test.jobSet, jobSetFlag) } diff --git a/cmd/armadactl/cmd/commands.go b/cmd/armadactl/cmd/commands.go index 619ea15f624..f8fab0125f4 100644 --- a/cmd/armadactl/cmd/commands.go +++ b/cmd/armadactl/cmd/commands.go @@ -9,7 +9,8 @@ import ( func createCmd(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ Use: "create", - Short: "Create Armada resource. Supported: queue", + Short: "Create Armada resource", + Long: "Create Armada resource. Supported: queue", PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) }, @@ -32,7 +33,8 @@ func createCmd(a *armadactl.App) *cobra.Command { func deleteCmd() *cobra.Command { cmd := &cobra.Command{ Use: "delete", - Short: "Delete Armada resource. Supported: queue", + Short: "Delete Armada resource", + Long: "Delete Armada resource. Supported: queue", } cmd.AddCommand(queueDeleteCmd()) return cmd @@ -41,7 +43,8 @@ func deleteCmd() *cobra.Command { func updateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "update", - Short: "Update Armada resource. Supported: queue", + Short: "Update Armada resource", + Long: "Update Armada resource. Supported: queue", } cmd.AddCommand(queueUpdateCmd()) return cmd @@ -50,8 +53,15 @@ func updateCmd() *cobra.Command { func getCmd() *cobra.Command { cmd := &cobra.Command{ Use: "get", - Short: "Retrieve information about armada resource. Supported: queue", + Short: "Retrieve information about armada resource", + Long: "Retrieve information about armada resource. Supported: queue, scheduling-report, queue-report, job-report", } - cmd.AddCommand(queueGetCmd()) + cmd.AddCommand( + queueGetCmd(), + getSchedulingReportCmd(armadactl.New()), + getQueueSchedulingReportCmd(armadactl.New()), + getJobSchedulingReportCmd(armadactl.New()), + ) + return cmd } diff --git a/cmd/armadactl/cmd/config.go b/cmd/armadactl/cmd/config.go index 6049b5ddc04..e3bd2cd7f02 100644 --- a/cmd/armadactl/cmd/config.go +++ b/cmd/armadactl/cmd/config.go @@ -4,20 +4,21 @@ import ( "fmt" "strings" - "github.com/armadaproject/armada/internal/armadactl" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/pkg/client" - "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" "github.com/spf13/viper" "golang.org/x/exp/slices" + + "github.com/armadaproject/armada/internal/armadactl" + armadaslices "github.com/armadaproject/armada/internal/common/slices" + "github.com/armadaproject/armada/pkg/client" ) func configCmd(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ Use: "config", - Short: "Operations on armadactl config. Supported: use-context, get-contexts, current-context", + Short: "Operations on armadactl config", + Long: "Operations on armadactl config. Supported: use-context, get-contexts, current-context", } cmd.AddCommand(getContextsCmd(a)) @@ -76,7 +77,7 @@ func getContextsCmd(a *armadactl.App) *cobra.Command { }, RunE: func(cmd *cobra.Command, args []string) error { currentContext := viper.GetString("currentContext") - allContexts := util.Map(client.ExtractConfigurationContexts(), func(context string) string { + allContexts := armadaslices.Map(client.ExtractConfigurationContexts(), func(context string) string { if context == currentContext { return fmt.Sprintf("%s (current)", context) } diff --git a/cmd/armadactl/cmd/docs.go b/cmd/armadactl/cmd/docs.go new file mode 100644 index 00000000000..d0c58a9ae9e --- /dev/null +++ b/cmd/armadactl/cmd/docs.go @@ -0,0 +1,52 @@ +package cmd + +import ( + "embed" + "fmt" + "io/fs" + + "github.com/charmbracelet/glamour" + "github.com/spf13/cobra" +) + +//go:embed resources/README.md +var ArmadactlReadMe embed.FS + +//go:embed resources/glamourStyle.json +var GlamourStyle embed.FS + +func docsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "docs", + Short: "Prints comprehensive Armadactl documentation", + RunE: func(cmd *cobra.Command, args []string) error { + readmeContent, err := fs.ReadFile(ArmadactlReadMe, "resources/README.md") + if err != nil { + return fmt.Errorf("could not read content from documentation file: %s", err) + } + + glamourStyleContent, err := fs.ReadFile(GlamourStyle, "resources/glamourStyle.json") + if err != nil { + return fmt.Errorf("could not read content from documentation file: %s", err) + } + + renderer, err := glamour.NewTermRenderer( + glamour.WithStylesFromJSONBytes(glamourStyleContent), + glamour.WithWordWrap(120), + ) + if err != nil { + return fmt.Errorf("could not create documentation renderer: %s", err) + } + + out, err := renderer.Render(string(readmeContent)) + if err != nil { + return fmt.Errorf("could not render content from documentation file: %s", err) + } + + fmt.Println(out) + return nil + }, + } + + return cmd +} diff --git a/cmd/armadactl/cmd/kube.go b/cmd/armadactl/cmd/kube.go deleted file mode 100644 index 2b28eb3abe8..00000000000 --- a/cmd/armadactl/cmd/kube.go +++ /dev/null @@ -1,67 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/internal/armadactl" -) - -func kubeCmd() *cobra.Command { - a := armadactl.New() - cmd := &cobra.Command{ - Use: "kube", - Short: "output kubectl command to access pod information", - Long: "This command can be used to query kubernetes pods for a particular job.", - Example: `armadactl kube logs --queue my-queue --jobSet my-set --jobId 123456 - -In bash, you can execute it directly like this: - $(armadactl kube logs --queue my-queue --jobSet my-set --jobId 123456) --tail=20`, - PreRunE: func(cmd *cobra.Command, args []string) error { - return initParams(cmd, a.Params) - }, - RunE: func(cmd *cobra.Command, args []string) error { - jobId, err := cmd.Flags().GetString("jobId") - if err != nil { - return fmt.Errorf("error reading jobId: %s", err) - } - - queueName, err := cmd.Flags().GetString("queue") - if err != nil { - return fmt.Errorf("error reading queueName: %s", err) - } - - jobSetId, err := cmd.Flags().GetString("jobSet") - if err != nil { - return fmt.Errorf("error reading jobSet: %s", err) - } - - podNumber, err := cmd.Flags().GetInt("podNumber") - if err != nil { - return fmt.Errorf("error reading podNumber: %s", err) - } - - return a.Kube(jobId, queueName, jobSetId, podNumber, args) - }, - } - cmd.Flags().String( - "jobId", "", "job to cancel") - if err := cmd.MarkFlagRequired("jobId"); err != nil { - panic(err) - } - cmd.Flags().String( - "queue", "", "queue of the job") - if err := cmd.MarkFlagRequired("queue"); err != nil { - panic(err) - } - cmd.Flags().String( - "jobSet", "", "jobSet of the job") - if err := cmd.MarkFlagRequired("jobSet"); err != nil { - panic(err) - } - cmd.Flags().Int( - "podNumber", 0, "[optional] for jobs with multiple pods, index of the pod") - cmd.FParseErrWhitelist.UnknownFlags = true - return cmd -} diff --git a/cmd/armadactl/cmd/preempt.go b/cmd/armadactl/cmd/preempt.go index e1dd0df811d..4458e905b0e 100644 --- a/cmd/armadactl/cmd/preempt.go +++ b/cmd/armadactl/cmd/preempt.go @@ -7,33 +7,31 @@ import ( ) func preemptCmd() *cobra.Command { - a := armadactl.New() cmd := &cobra.Command{ Use: "preempt", - Short: "Prempt jobs in armada.", - Long: `Preempt jobs by queue, jobset and jobId.`, + Short: "Preempt jobs in armada.", Args: cobra.ExactArgs(0), + } + cmd.AddCommand(preemptJobCmd()) + return cmd +} + +func preemptJobCmd() *cobra.Command { + a := armadactl.New() + cmd := &cobra.Command{ + Use: "job ", + Short: "Preempt an armada job.", + Long: `Preempt a job by providing it's queue, jobset and jobId.`, + Args: cobra.ExactArgs(3), PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) }, RunE: func(cmd *cobra.Command, args []string) error { - jobId, _ := cmd.Flags().GetString("jobId") - queue, _ := cmd.Flags().GetString("queue") - jobSetId, _ := cmd.Flags().GetString("jobSet") + queue := args[0] + jobSetId := args[1] + jobId := args[2] return a.Preempt(queue, jobSetId, jobId) }, } - cmd.Flags().String("jobId", "", "job to cancel") - if err := cmd.MarkFlagRequired("jobId"); err != nil { - panic(err) - } - cmd.Flags().String("queue", "", "queue of the job to be cancelled") - if err := cmd.MarkFlagRequired("queue"); err != nil { - panic(err) - } - cmd.Flags().String("jobSet", "", "jobSet of the job to be cancelled") - if err := cmd.MarkFlagRequired("jobSet"); err != nil { - panic(err) - } return cmd } diff --git a/cmd/armadactl/cmd/queue.go b/cmd/armadactl/cmd/queue.go index ea6db4b449f..1eed93778b9 100644 --- a/cmd/armadactl/cmd/queue.go +++ b/cmd/armadactl/cmd/queue.go @@ -17,7 +17,7 @@ func queueCreateCmd() *cobra.Command { // Takes a caller-supplied app struct; useful for testing. func queueCreateCmdWithApp(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "queue ", + Use: "queue ", Short: "Create new queue", Long: `Every job submitted to armada needs to be associated with queue. @@ -29,11 +29,11 @@ Job priority is evaluated inside queue, queue has its own priority.`, RunE: func(cmd *cobra.Command, args []string) error { name := args[0] - // TODO cmd.Flags().GetFloat64("priorityFactor") returns (0, nil) for invalid input (e.g., "not_a_float") + // TODO cmd.Flags().GetFloat64("priority-factor") returns (0, nil) for invalid input (e.g., "not_a_float") // TODO the other Flags get methods also fail to return errors on invalid input - priorityFactor, err := cmd.Flags().GetFloat64("priorityFactor") + priorityFactor, err := cmd.Flags().GetFloat64("priority-factor") if err != nil { - return fmt.Errorf("error reading priorityFactor: %s", err) + return fmt.Errorf("error reading priority-factor: %s", err) } owners, err := cmd.Flags().GetStringSlice("owners") @@ -41,9 +41,9 @@ Job priority is evaluated inside queue, queue has its own priority.`, return fmt.Errorf("error reading owners: %s", err) } - groups, err := cmd.Flags().GetStringSlice("groupOwners") + groups, err := cmd.Flags().GetStringSlice("group-owners") if err != nil { - return fmt.Errorf("error reading groupOwners: %s", err) + return fmt.Errorf("error reading group-owners: %s", err) } queue, err := queue.NewQueue(&api.Queue{ @@ -59,9 +59,9 @@ Job priority is evaluated inside queue, queue has its own priority.`, return a.CreateQueue(queue) }, } - cmd.Flags().Float64("priorityFactor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") + cmd.Flags().Float64("priority-factor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") cmd.Flags().StringSlice("owners", []string{}, "Comma separated list of queue owners, defaults to current user.") - cmd.Flags().StringSlice("groupOwners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") + cmd.Flags().StringSlice("group-owners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") return cmd } @@ -72,7 +72,7 @@ func queueDeleteCmd() *cobra.Command { // Takes a caller-supplied app struct; useful for testing. func queueDeleteCmdWithApp(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "queue ", + Use: "queue ", Short: "Delete existing queue", Long: "Deletes queue if it exists, the queue needs to be empty at the time of deletion.", Args: cobra.ExactArgs(1), @@ -94,9 +94,9 @@ func queueGetCmd() *cobra.Command { // Takes a caller-supplied app struct; useful for testing. func queueGetCmdWithApp(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "queue ", - Short: "Gets Queue Information.", - Long: "Gets Queue Information", + Use: "queue ", + Short: "Gets queue information.", + Long: "Gets queue information", Args: cobra.ExactArgs(1), PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) @@ -116,7 +116,7 @@ func queueUpdateCmd() *cobra.Command { // Takes a caller-supplied app struct; useful for testing. func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "queue ", + Use: "queue ", Short: "Update an existing queue", Long: "Update settings of an existing queue", Args: cobra.ExactArgs(1), @@ -126,9 +126,9 @@ func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { name := args[0] - priorityFactor, err := cmd.Flags().GetFloat64("priorityFactor") + priorityFactor, err := cmd.Flags().GetFloat64("priority-factor") if err != nil { - return fmt.Errorf("error reading priorityFactor: %s", err) + return fmt.Errorf("error reading priority-factor: %s", err) } owners, err := cmd.Flags().GetStringSlice("owners") @@ -136,9 +136,9 @@ func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { return fmt.Errorf("error reading owners: %s", err) } - groups, err := cmd.Flags().GetStringSlice("groupOwners") + groups, err := cmd.Flags().GetStringSlice("group-owners") if err != nil { - return fmt.Errorf("error reading groupOwners: %s", err) + return fmt.Errorf("error reading group-owners: %s", err) } queue, err := queue.NewQueue(&api.Queue{ @@ -155,8 +155,8 @@ func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { }, } // TODO this will overwrite existing values with default values if not all flags are provided - cmd.Flags().Float64("priorityFactor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") + cmd.Flags().Float64("priority-factor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") cmd.Flags().StringSlice("owners", []string{}, "Comma separated list of queue owners, defaults to current user.") - cmd.Flags().StringSlice("groupOwners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") + cmd.Flags().StringSlice("group-owners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") return cmd } diff --git a/cmd/armadactl/cmd/queue_test.go b/cmd/armadactl/cmd/queue_test.go index eab9ad14431..4dcb16a0a7b 100644 --- a/cmd/armadactl/cmd/queue_test.go +++ b/cmd/armadactl/cmd/queue_test.go @@ -42,9 +42,9 @@ func TestCreate(t *testing.T) { GroupOwners []string }{ "default flags": {nil, nil, nil, nil}, - "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, + "valid priority": {[]flag{{"priority-factor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil}, - "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, + "valid group owners": {[]flag{{"group-owners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, } for name, test := range tests { @@ -122,9 +122,9 @@ func TestUpdate(t *testing.T) { GroupOwners []string }{ "default flags": {nil, nil, nil, nil}, - "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, + "valid priority": {[]flag{{"priority-factor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil}, - "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, + "valid group owners": {[]flag{{"group-owners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, } for name, test := range tests { diff --git a/cmd/armadactl/cmd/reprioritize.go b/cmd/armadactl/cmd/reprioritize.go index c43b6c7ed53..9264d342438 100644 --- a/cmd/armadactl/cmd/reprioritize.go +++ b/cmd/armadactl/cmd/reprioritize.go @@ -10,42 +10,66 @@ import ( ) func reprioritizeCmd() *cobra.Command { - a := armadactl.New() cmd := &cobra.Command{ - Use: "reprioritize ", + Use: "reprioritize", Short: "Reprioritize jobs in Armada", - Long: `Change the priority of a single or multiple jobs by specifying either a job id or a combination of queue & job set.`, - Args: cobra.ExactArgs(1), + Long: `Change the priority of a single job or entire job-set. Supported: job, job-set`, + } + cmd.AddCommand( + reprioritizeJobCmd(), + reprioritizeJobSetCmd(), + ) + + return cmd +} + +func reprioritizeJobCmd() *cobra.Command { + a := armadactl.New() + cmd := &cobra.Command{ + Use: "job ", + Short: `Change the priority of a single job.`, + Args: cobra.ExactArgs(4), PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) }, RunE: func(cmd *cobra.Command, args []string) error { - priorityString := args[0] + // Ignoring first two arguments until Server API change makes queue and job-set a requirement + queue := args[0] + jobSet := args[1] + jobId := args[2] + priorityString := args[3] priorityFactor, err := strconv.ParseFloat(priorityString, 64) if err != nil { return fmt.Errorf("error converting %s to float64: %s", priorityString, err) } - jobId, err := cmd.Flags().GetString("jobId") - if err != nil { - return fmt.Errorf("error reading jobId: %s", err) - } + return a.ReprioritizeJob(queue, jobSet, jobId, priorityFactor) + }, + } + return cmd +} - queueName, err := cmd.Flags().GetString("queue") - if err != nil { - return fmt.Errorf("error reading queueName: %s", err) - } +func reprioritizeJobSetCmd() *cobra.Command { + a := armadactl.New() + cmd := &cobra.Command{ + Use: "job-set ", + Short: `Change the priority of an entire job set.`, + Args: cobra.ExactArgs(3), + PreRunE: func(cmd *cobra.Command, args []string) error { + return initParams(cmd, a.Params) + }, + RunE: func(cmd *cobra.Command, args []string) error { + queue := args[0] + jobSet := args[1] - jobSetId, err := cmd.Flags().GetString("jobSet") + priorityString := args[2] + priorityFactor, err := strconv.ParseFloat(priorityString, 64) if err != nil { - return fmt.Errorf("error reading jobSet: %s", err) + return fmt.Errorf("error converting %s to float64: %s", priorityString, err) } - return a.Reprioritize(jobId, queueName, jobSetId, priorityFactor) + return a.ReprioritizeJobSet(queue, jobSet, priorityFactor) }, } - cmd.Flags().String("jobId", "", "Job to reprioritize") - cmd.Flags().String("queue", "", "Queue including jobs to be reprioritized (requires job set to be specified)") - cmd.Flags().String("jobSet", "", "Job set including jobs to be reprioritized (requires queue to be specified)") return cmd } diff --git a/cmd/armadactl/cmd/resources.go b/cmd/armadactl/cmd/resources.go deleted file mode 100644 index 77faeff2ea3..00000000000 --- a/cmd/armadactl/cmd/resources.go +++ /dev/null @@ -1,26 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/internal/armadactl" -) - -func resourcesCmd() *cobra.Command { - a := armadactl.New() - cmd := &cobra.Command{ - Use: "resources ", - Short: "Prints out maximum resource usage for individual jobs.", - Long: `Prints out maximum resource usage for individual jobs in job set.`, - Args: cobra.ExactArgs(2), - PreRunE: func(cmd *cobra.Command, args []string) error { - return initParams(cmd, a.Params) - }, - RunE: func(cmd *cobra.Command, args []string) error { - queueName := args[0] - jobSetId := args[1] - return a.Resources(queueName, jobSetId) - }, - } - return cmd -} diff --git a/cmd/armadactl/cmd/resources/README.md b/cmd/armadactl/cmd/resources/README.md new file mode 100644 index 00000000000..0ad5ab766a9 --- /dev/null +++ b/cmd/armadactl/cmd/resources/README.md @@ -0,0 +1,126 @@ +# Armadactl + +Armadactl is a command-line tool used for managing jobs in the Armada workload orchestration system. It provides functionality for creating, updating, and deleting jobs, as well as monitoring job status and resource usage. + +## Armadactl configuration +Armadactl config files are structured as follows: +```yaml +currentContext: main # Default context to be used by Armadactl +contexts: + main: + armadaUrl: + execAuth: + cmd: + args: + - + test: + armadaUrl: + execAuth: + cmd: + args: + - +``` + +By default, armadactl assumes that a configuration file exists at `$HOME/.armadactl.yaml`. You can provide your own +config file by specifying `--config $CONFIG_FILE_PATH` when running armadactl. + +We also support a legacy armadactl config structure, although this will soon be deprecated: +```yaml +armadaUrl: +execAuth: + cmd: + args: + - +``` + +Under both structures, BasicAuth and various oidc auth methods are also supported. + +## Usage +Once Armadactl is successfully installed, you can use it to execute Armada subcommands by running the following command: +```bash +armadactl [subcommand] [flags] +``` + +### Available subcommands: +- **config** : The config subcommand enables users to configure how they interact with multiple Armada instances, which we refer to as _contexts_. The subcommand allows users to view, get and set contexts. Contexts are not supported with the legacy armadactl configuration. + - **use-context** : Sets the default context for future armadactl commands + ```bash + armadactl config use-context + ``` + - **get-contexts** : Retrieves all contexts from the current armadactl configuration. If no configuration has been specified, armadactl defaults to `$HOME/.armadactl.yaml` + ```bash + armadactl config get-contexts + ``` + - **current-context** : Retrieves the context which is currently set as the default. + ```bash + armadactl config current-context + ``` +- **preempt** : The preempt subcommand can be used to preempt running Armada resources. + - **job** : Preempts an individual job. + ```bash + armadactl preempt job [flags] + ``` +- **cancel** : The cancel subcommand can be used to cancel running Armada resources. + - **job** : Cancels an individual job. + ```bash + armadactl cancel job [flags] + ``` + - **job-set** : Cancels all jobs within a job-set. + ```bash + armadactl cancel job-set [flags] + ``` +- **create** : The create subcommand can be used to create a new Armada resource. + - **queue** : Allows users to create a queue. + ```bash + armadactl create queue [flags] + ``` +- **delete** : The delete subcommand can be used to delete an existing Armada resource. + - **queue** : Allows users to delete a given queue. + ```bash + armadactl delete queue [flags] + ``` +- **update** : The update subcommand can be used to update an existing Armada resource. + - **queue** : Allows users to update queue priority, owners or group-owners of a given queue. + ```bash + armadactl update queue [flags] + ``` +- **reprioritize** : The reprioritize subcommand can be used to change the priority of a running Armada resource. + - **job** : Allows users to change the priority of an individual job + ```bash + armadactl reprioritize job [flags] + ``` + - **job-set** : Allows users to change the priority of all jobs in a job-set + ```bash + armadactl reprioritize job-set [flags] + ``` +- **submit** : The submit subcommand can be used to submit a set of manifests to an existing Armada deployment. +```bash +armadactl submit [flags] +``` +- **version** : The version subcommand can be used to get the version of Armada that is currently installed. +```bash +armadactl version [flags] +``` +- **watch** : The watch subcommand can be used to watch the status of an Armada job. +```bash +armadactl watch [flags] +``` +- **get** : Allows users to retrieve more information about Armada resources. + - **queue** : This subcommand retrieves a report of the current scheduling status of all queues in the Armada cluster. + ```bash + armadactl get queue + ``` + - **queue-report** : This subcommand retrieves a report of the current scheduling status of all queues in the Armada cluster. + ```bash + armadactl get queue-report + ``` + - **job-report** : This subcommand retrieves a report of the current scheduling status of all jobs in the Armada cluster. + ```bash + armadactl get job-report + ``` + - **scheduling-report** : This subcommand retrieves a report of the current scheduling status in the Armada cluster. + ```bash + armadactl get scheduling-report + ``` + +For a full list of subcommands and options, you can run **armadactl --help**. diff --git a/cmd/armadactl/cmd/resources/glamourStyle.json b/cmd/armadactl/cmd/resources/glamourStyle.json new file mode 100644 index 00000000000..317ba28e45f --- /dev/null +++ b/cmd/armadactl/cmd/resources/glamourStyle.json @@ -0,0 +1,192 @@ +{ + "document": { + "block_prefix": "\n", + "block_suffix": "\n", + "color": "252", + "margin": 2 + }, + "block_quote": { + "indent": 1, + "indent_token": "│ " + }, + "paragraph": {}, + "list": { + "level_indent": 2 + }, + "heading": { + "block_suffix": "\n", + "color": "39", + "bold": true + }, + "h1": { + "prefix": " ", + "suffix": " ", + "color": "228", + "background_color": "63", + "bold": true + }, + "h2": { + "prefix": "## " + }, + "h3": { + "prefix": "### " + }, + "h4": { + "prefix": "#### " + }, + "h5": { + "prefix": "##### " + }, + "h6": { + "prefix": "###### ", + "color": "35", + "bold": false + }, + "text": {}, + "strikethrough": { + "crossed_out": true + }, + "emph": { + "italic": true + }, + "strong": { + "bold": true + }, + "hr": { + "color": "240", + "format": "\n--------\n" + }, + "item": { + "block_prefix": "• " + }, + "enumeration": { + "block_prefix": ". " + }, + "task": { + "ticked": "[✓] ", + "unticked": "[ ] " + }, + "link": { + "color": "30", + "underline": true + }, + "link_text": { + "color": "35", + "bold": true + }, + "image": { + "color": "212", + "underline": true + }, + "image_text": { + "color": "243", + "format": "Image: {{.text}} →" + }, + "code": { + "prefix": " ", + "suffix": " ", + "color": "203", + "background_color": "236" + }, + "code_block": { + "color": "244", + "margin": 0, + "chroma": { + "text": { + "color": "#FFFFFF", + "background_color": "#3B3B3B" + }, + "error": { + "color": "#F1F1F1" + }, + "comment": { + "color": "#676767" + }, + "comment_preproc": { + "color": "#FF875F" + }, + "keyword": { + "color": "#00AAFF" + }, + "keyword_reserved": { + "color": "#FF5FD2" + }, + "keyword_namespace": { + "color": "#FF5F87" + }, + "keyword_type": { + "color": "#6E6ED8" + }, + "operator": { + "color": "#EF8080" + }, + "punctuation": { + "color": "#E8E8A8" + }, + "name": { + "color": "#C4C4C4" + }, + "name_builtin": { + "color": "#FF8EC7" + }, + "name_tag": { + "color": "#B083EA" + }, + "name_attribute": { + "color": "#7A7AE6" + }, + "name_class": { + "color": "#F1F1F1", + "underline": true, + "bold": true + }, + "name_constant": {}, + "name_decorator": { + "color": "#FFFF87" + }, + "name_exception": {}, + "name_function": { + "color": "#00D787" + }, + "name_other": {}, + "literal": {}, + "literal_number": { + "color": "#6EEFC0" + }, + "literal_date": {}, + "literal_string": { + "color": "#C69669" + }, + "literal_string_escape": { + "color": "#AFFFD7" + }, + "generic_deleted": { + "color": "#FD5B5B" + }, + "generic_emph": { + "italic": true + }, + "generic_inserted": { + "color": "#00D787" + }, + "generic_strong": { + "bold": true + }, + "generic_subheading": { + "color": "#777777" + } + } + }, + "table": { + "center_separator": "┼", + "column_separator": "│", + "row_separator": "─" + }, + "definition_list": {}, + "definition_term": {}, + "definition_description": { + "block_prefix": "\n🠶 " + }, + "html_block": {}, + "html_span": {} +} diff --git a/cmd/armadactl/cmd/root.go b/cmd/armadactl/cmd/root.go index 1e6eacfdb13..faaac5e56ff 100644 --- a/cmd/armadactl/cmd/root.go +++ b/cmd/armadactl/cmd/root.go @@ -13,39 +13,23 @@ func RootCmd() *cobra.Command { cmd := &cobra.Command{ Use: "armadactl", Short: "armadactl controls the Armada batch job queueing system.", - Long: `armadactl controls the Armada batch job queueing system. - -Persistent config can be saved in a config file so it doesn't have to be specified every command. - -Example structure: -armadaUrl: localhost:50051 -basicAuth: -username: user1 -password: password123 - -The location of this file can be passed in using --config argument or picked from $HOME/.armadactl.yaml.`, } client.AddArmadaApiConnectionCommandlineArgs(cmd) cmd.AddCommand( - analyzeCmd(), cancelCmd(), createCmd(armadactl.New()), deleteCmd(), updateCmd(), getCmd(), - kubeCmd(), reprioritizeCmd(), - resourcesCmd(), submitCmd(), versionCmd(), watchCmd(), - getSchedulingReportCmd(armadactl.New()), - getQueueSchedulingReportCmd(armadactl.New()), - getJobSchedulingReportCmd(armadactl.New()), configCmd(armadactl.New()), preemptCmd(), + docsCmd(), ) return cmd diff --git a/cmd/armadactl/cmd/scheduling.go b/cmd/armadactl/cmd/scheduling.go index aeaf524dfc3..74d893d861b 100644 --- a/cmd/armadactl/cmd/scheduling.go +++ b/cmd/armadactl/cmd/scheduling.go @@ -32,7 +32,7 @@ func getSchedulingReportCmd(a *armadactl.App) *cobra.Command { return a.GetSchedulingReportForQueue(queueName, int32(verbosity)) } - jobId, err := cmd.Flags().GetString("job") + jobId, err := cmd.Flags().GetString("job-id") if err != nil { return err } @@ -47,18 +47,18 @@ func getSchedulingReportCmd(a *armadactl.App) *cobra.Command { cmd.Flags().CountP("verbose", "v", "report verbosity; repeat (e.g., -vvv) to increase verbosity") - cmd.Flags().String("queue", "", "get scheduler reports relevant for this queue; mutually exclusive with --job") - cmd.Flags().String("job", "", "get scheduler reports relevant for this job; mutually exclusive with --queue") - cmd.MarkFlagsMutuallyExclusive("queue", "job") + cmd.Flags().String("queue", "", "get scheduler reports relevant for this queue; mutually exclusive with --job-id") + cmd.Flags().String("job-id", "", "get scheduler reports relevant for this job; mutually exclusive with --queue") + cmd.MarkFlagsMutuallyExclusive("queue", "job-id") return cmd } func getQueueSchedulingReportCmd(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "queue-report", + Use: "queue-report ", Short: "Get queue scheduler reports", - Args: cobra.ExactArgs(0), + Args: cobra.ExactArgs(1), SilenceUsage: true, PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) @@ -69,10 +69,7 @@ func getQueueSchedulingReportCmd(a *armadactl.App) *cobra.Command { return err } - queueName, err := cmd.Flags().GetString("queue") - if err != nil { - return err - } + queueName := args[0] queueName = strings.TrimSpace(queueName) return a.GetQueueSchedulingReport(queueName, int32(verbosity)) @@ -81,29 +78,23 @@ func getQueueSchedulingReportCmd(a *armadactl.App) *cobra.Command { cmd.Flags().CountP("verbose", "v", "report verbosity; repeat (e.g., -vvv) to increase verbosity") - cmd.Flags().String("queue", "", "Queue name to query reports for.") - return cmd } func getJobSchedulingReportCmd(a *armadactl.App) *cobra.Command { cmd := &cobra.Command{ - Use: "job-report", + Use: "job-report ", Short: "Get job scheduler reports", - Args: cobra.ExactArgs(0), + Args: cobra.ExactArgs(1), SilenceUsage: true, PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) }, RunE: func(cmd *cobra.Command, args []string) error { - jobId, err := cmd.Flags().GetString("jobId") - if err != nil { - return err - } + jobId := args[0] jobId = strings.TrimSpace(jobId) return a.GetJobSchedulingReport(jobId) }, } - cmd.Flags().String("jobId", "", "Id of job to query reports for.") return cmd } diff --git a/cmd/armadactl/cmd/watch.go b/cmd/armadactl/cmd/watch.go index ff9f7c0fc07..aeead01aa78 100644 --- a/cmd/armadactl/cmd/watch.go +++ b/cmd/armadactl/cmd/watch.go @@ -11,9 +11,9 @@ import ( func watchCmd() *cobra.Command { a := armadactl.New() cmd := &cobra.Command{ - Use: "watch ", + Use: "watch ", Short: "Watch job events in job set.", - Long: "Listens for and prints events associated with a particular queue and jobset.", + Long: "Listens for and prints events associated with a particular queue and job-set.", Args: cobra.ExactArgs(2), PreRunE: func(cmd *cobra.Command, args []string) error { return initParams(cmd, a.Params) diff --git a/cmd/eventsprinter/cmd/root.go b/cmd/eventsprinter/cmd/root.go deleted file mode 100644 index 2077c7611de..00000000000 --- a/cmd/eventsprinter/cmd/root.go +++ /dev/null @@ -1,41 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/cmd/eventsprinter/logic" -) - -// RootCmd is the root Cobra command that gets called from the main func. -// All other sub-commands should be registered here. -func RootCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "eventsprinter", - Short: "eventsprinter prints all Pulsar events", - RunE: func(cmd *cobra.Command, args []string) error { - url, err := cmd.PersistentFlags().GetString("url") - if err != nil { - return err - } - verbose, err := cmd.PersistentFlags().GetBool("verbose") - if err != nil { - return err - } - subscription, err := cmd.PersistentFlags().GetString("subscription") - if err != nil { - return err - } - topic, err := cmd.PersistentFlags().GetString("topic") - if err != nil { - return err - } - return logic.PrintEvents(url, topic, subscription, verbose) - }, - } - cmd.PersistentFlags().String("url", "pulsar://localhost:6650", "URL to connect to Pulsar on.") - cmd.PersistentFlags().Bool("verbose", false, "Print full event sequences.") - cmd.PersistentFlags().String("subscription", "eventsprinter", "Subscription to connect to Pulsar on.") - cmd.PersistentFlags().String("topic", "events", "Pulsar topic to subscribe to.") - - return cmd -} diff --git a/cmd/eventsprinter/logic/logic.go b/cmd/eventsprinter/logic/logic.go deleted file mode 100644 index b7a9dab8ea7..00000000000 --- a/cmd/eventsprinter/logic/logic.go +++ /dev/null @@ -1,229 +0,0 @@ -package logic - -import ( - "fmt" - "time" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/gogo/protobuf/proto" - v1 "k8s.io/api/core/v1" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/pkg/armadaevents" -) - -func PrintEvents(url, topic, subscription string, verbose bool) error { - fmt.Println("Subscribing to Pulsar events") - fmt.Println("URL:", url) - fmt.Println("Topic:", topic) - fmt.Println("Subscription", subscription) - return withSetup(url, topic, subscription, func(ctx *armadacontext.Context, producer pulsar.Producer, consumer pulsar.Consumer) error { - // Number of active jobs. - numJobs := 0 - - // Time at which numJobs most recently changed from 0 to 1 and from 1 to 0, respectively. - risingEdge := time.Now() - - for { - msg, err := consumer.Receive(ctx) - if err != nil { - fmt.Println(err) - time.Sleep(time.Second) - continue - } - - util.RetryUntilSuccess( - ctx, - func() error { return consumer.Ack(msg) }, - func(err error) { - fmt.Println(err) - time.Sleep(time.Second) - }, - ) - - sequence := &armadaevents.EventSequence{} - err = proto.Unmarshal(msg.Payload(), sequence) - if err != nil { - fmt.Println(err) - continue - } - - // Skip sequences with no events. - if len(sequence.Events) == 0 { - continue - } - - // Count number of active jobs - for _, event := range sequence.Events { - if isSubmitJob(event) { - if numJobs == 0 { - risingEdge = time.Now() - } - numJobs++ - } else if isJobFailed(event) { - numJobs-- - } else if isJobSucceeded(event) { - numJobs-- - } - } - - if !verbose { - fmt.Printf("> EventSequence w. %d events (%d jobs active, for %s)\n", - len(sequence.Events), numJobs, time.Since(risingEdge)) - fmt.Printf(" (Queue: %s, JobSetName: %s, UserId: %s, Groups: %v)\n", - sequence.GetQueue(), sequence.GetJobSetName(), sequence.GetUserId(), sequence.GetGroups()) - for _, event := range sequence.Events { - // On error, we print an empty id. - jobId, _ := armadaevents.JobIdFromEvent(event) - fmt.Printf("\t%T (job %s)\n", event.Event, armadaevents.UlidFromProtoUuid(jobId)) - - if submitJob, ok := event.Event.(*armadaevents.EventSequence_Event_SubmitJob); ok { - mainObject := submitJob.SubmitJob.GetMainObject() - fmt.Printf("\t\tMainObject: %T\n", mainObject.Object) - fmt.Printf("\t\t\tObjectMeta: %v\n", mainObject.GetObjectMeta()) - if mainObject, ok := (submitJob.SubmitJob.GetMainObject().Object).(*armadaevents.KubernetesMainObject_PodSpec); ok { - fmt.Printf("\t\t\tTolerations: %v\n", mainObject.PodSpec.GetPodSpec().Tolerations) - } - for i, object := range submitJob.SubmitJob.GetObjects() { - fmt.Printf("\t\tObject %d: %T\n", i, object.GetObject()) - fmt.Printf("\t\t\tObjectMeta: %v\n", object.GetObjectMeta()) - } - } else if duplicateDetected, ok := event.Event.(*armadaevents.EventSequence_Event_JobDuplicateDetected); ok { - newId, err := armadaevents.UlidStringFromProtoUuid(duplicateDetected.JobDuplicateDetected.NewJobId) - if err != nil { - panic(err) - } - oldId, err := armadaevents.UlidStringFromProtoUuid(duplicateDetected.JobDuplicateDetected.OldJobId) - if err != nil { - panic(err) - } - fmt.Printf("\t\tNew job %s is a duplicate of existing job %s\n", newId, oldId) - } else if jobRunErrors, ok := event.Event.(*armadaevents.EventSequence_Event_JobRunErrors); ok { - for _, e := range jobRunErrors.JobRunErrors.Errors { - fmt.Printf("\t\t%T\n", e.Reason) - } - } else if jobErrors, ok := event.Event.(*armadaevents.EventSequence_Event_JobErrors); ok { - for _, e := range jobErrors.JobErrors.Errors { - fmt.Printf("\t\t%T\n", e.Reason) - } - } - } - } else { - // Remove fields from PodSpecs that result in panics when printing. - for _, event := range sequence.Events { - stripPodSpecsInEvent(event) - } - // TODO: This results in panics when there are tolerations in the podspec. - fmt.Printf("> EventSequence w. %d events (%d jobs active, for %s)\n%s\n", - len(sequence.Events), numJobs, time.Since(risingEdge), - proto.MarshalTextString(sequence)) - } - } - return nil - }) -} - -func isSubmitJob(e *armadaevents.EventSequence_Event) bool { - _, ok := (e.Event).(*armadaevents.EventSequence_Event_SubmitJob) - return ok -} - -func isJobFailed(e *armadaevents.EventSequence_Event) bool { - if m, ok := (e.Event).(*armadaevents.EventSequence_Event_JobRunErrors); ok { - for _, err := range m.JobRunErrors.Errors { - if err.Terminal { - return true - } - } - } - if m, ok := (e.Event).(*armadaevents.EventSequence_Event_JobErrors); ok { - for _, err := range m.JobErrors.Errors { - if err.Terminal { - return true - } - } - } - if _, ok := (e.Event).(*armadaevents.EventSequence_Event_CancelledJob); ok { - return true - } - return false -} - -func isJobSucceeded(e *armadaevents.EventSequence_Event) bool { - _, ok := (e.Event).(*armadaevents.EventSequence_Event_JobSucceeded) - return ok -} - -func stripPodSpecsInEvent(event *armadaevents.EventSequence_Event) { - submitJob, ok := (event.Event).(*armadaevents.EventSequence_Event_SubmitJob) - if ok { - if podSpec, ok := (submitJob.SubmitJob.MainObject.Object).(*armadaevents.KubernetesMainObject_PodSpec); ok { - podSpec.PodSpec.PodSpec = stripPodSpec(podSpec.PodSpec.PodSpec) - } - } -} - -// stripPodSpec returns a new PodSpec with the Resources field zeroed out, -// which we remove because it contains private values, which cause a panic when printed. -func stripPodSpec(spec *v1.PodSpec) *v1.PodSpec { - containers := make([]v1.Container, len(spec.Containers), len(spec.Containers)) - for i, container := range spec.Containers { - containers[i] = v1.Container{ - Name: container.Name, - Image: container.Image, - Command: container.Command, - Args: container.Args, - WorkingDir: container.WorkingDir, - Ports: container.Ports, - EnvFrom: container.EnvFrom, - Env: container.Env, - Resources: v1.ResourceRequirements{}, // This is the problem - VolumeMounts: container.VolumeMounts, - VolumeDevices: container.VolumeDevices, - LivenessProbe: container.LivenessProbe, - ReadinessProbe: container.ReadinessProbe, - StartupProbe: container.StartupProbe, - Lifecycle: container.Lifecycle, - TerminationMessagePath: container.TerminationMessagePath, - TerminationMessagePolicy: container.TerminationMessagePolicy, - ImagePullPolicy: container.ImagePullPolicy, - SecurityContext: container.SecurityContext, - Stdin: container.Stdin, - StdinOnce: container.StdinOnce, - TTY: container.TTY, - } - } - spec.Containers = containers - return spec -} - -// Run action with an Armada submit client and a Pulsar producer and consumer. -func withSetup(url, topic, subscription string, action func(ctx *armadacontext.Context, producer pulsar.Producer, consumer pulsar.Consumer) error) error { - pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{ - URL: url, - }) - if err != nil { - return err - } - defer pulsarClient.Close() - - producer, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{ - Topic: topic, - }) - if err != nil { - return err - } - defer producer.Close() - - consumer, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{ - Topic: topic, - SubscriptionName: subscription, - }) - if err != nil { - return err - } - defer consumer.Close() - - return action(armadacontext.Background(), producer, consumer) -} diff --git a/cmd/eventsprinter/main.go b/cmd/eventsprinter/main.go deleted file mode 100644 index fd5aa8b5f3e..00000000000 --- a/cmd/eventsprinter/main.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/armadaproject/armada/cmd/eventsprinter/cmd" -) - -func main() { - root := cmd.RootCmd() - if err := root.Execute(); err != nil { - fmt.Println(err) - } -} diff --git a/cmd/executor/main.go b/cmd/executor/main.go index b8ece6dfe44..7090b712263 100644 --- a/cmd/executor/main.go +++ b/cmd/executor/main.go @@ -68,7 +68,7 @@ func main() { ) defer shutdownMetricServer() - shutdown, wg := executor.StartUp(armadacontext.Background(), logrus.NewEntry(logrus.New()), config) + shutdown, wg := executor.StartUp(armadacontext.Background(), logrus.NewEntry(logrus.StandardLogger()), config) go func() { <-shutdownChannel shutdown() diff --git a/cmd/lookoutv2/main.go b/cmd/lookoutv2/main.go index 107335bc8ae..83d53913e17 100644 --- a/cmd/lookoutv2/main.go +++ b/cmd/lookoutv2/main.go @@ -8,8 +8,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/spf13/viper" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" + armada_config "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" @@ -79,7 +80,14 @@ func migrate(ctx *armadacontext.Context, config configuration.LookoutV2Config) { } func prune(ctx *armadacontext.Context, config configuration.LookoutV2Config) { - db, err := database.OpenPgxConn(config.Postgres) + var dbConfig armada_config.PostgresConfig + if config.PrunerConfig.Postgres.Connection != nil { + dbConfig = config.PrunerConfig.Postgres + } else { + dbConfig = config.Postgres + } + + db, err := database.OpenPgxConn(dbConfig) if err != nil { panic(err) } @@ -98,7 +106,13 @@ func prune(ctx *armadacontext.Context, config configuration.LookoutV2Config) { ctxTimeout, cancel := armadacontext.WithTimeout(ctx, config.PrunerConfig.Timeout) defer cancel() - err = pruner.PruneDb(ctxTimeout, db, config.PrunerConfig.ExpireAfter, config.PrunerConfig.BatchSize, clock.RealClock{}) + err = pruner.PruneDb( + ctxTimeout, + db, + config.PrunerConfig.ExpireAfter, + config.PrunerConfig.DeduplicationExpireAfter, + config.PrunerConfig.BatchSize, + clock.RealClock{}) if err != nil { panic(err) } diff --git a/cmd/pulsartest/cmd/root.go b/cmd/pulsartest/cmd/root.go deleted file mode 100644 index 6ba13eca01d..00000000000 --- a/cmd/pulsartest/cmd/root.go +++ /dev/null @@ -1,28 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -// RootCmd is the root Cobra command that gets called from the main func. -// All other sub-commands should be registered here. -func RootCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "pulsartest", - Short: "pulsartest provides tools for generating and observing Pulsar events.", - } - - cmd.PersistentFlags().String("url", "pulsar://localhost:6650", "URL to connect to Pulsar on.") - cmd.PersistentFlags().Bool("authenticationEnabled", false, "Use authentication.") - cmd.PersistentFlags().String("authenticationType", "JWT", "Authentication type") - cmd.PersistentFlags().String("jwtTokenPath", "", "Path of JWT file") - cmd.PersistentFlags().String("jobsetEventsTopic", "events", "Pulsar topic for this jobset") - cmd.PersistentFlags().String("compressionType", "none", "Type of compression to use") - - cmd.AddCommand( - submitCmd(), - watchCmd(), - ) - - return cmd -} diff --git a/cmd/pulsartest/cmd/submit.go b/cmd/pulsartest/cmd/submit.go deleted file mode 100644 index 955396f7041..00000000000 --- a/cmd/pulsartest/cmd/submit.go +++ /dev/null @@ -1,43 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/pulsartest" -) - -func submitCmd() *cobra.Command { - a := &pulsartest.App{} - - cmd := &cobra.Command{ - Use: "submit ./path/to/events.yaml", - Short: "Submit events to Pulsar", - Long: "Submit events to Pulsar from file.", - Args: cobra.ExactArgs(1), - PreRunE: func(cmd *cobra.Command, args []string) error { - flags, err := processCmdFlags(cmd.Flags()) - if err != nil { - return err - } - - params := pulsartest.Params{ - Pulsar: configuration.PulsarConfig{ - URL: flags.url, - AuthenticationEnabled: flags.authEnable, - AuthenticationType: flags.authType, - JwtTokenPath: flags.jwtPath, - JobsetEventsTopic: flags.topic, - }, - } - a, err = pulsartest.New(params, "submit") - return err - }, - RunE: func(cmd *cobra.Command, args []string) error { - path := args[0] - return a.Submit(path) - }, - } - - return cmd -} diff --git a/cmd/pulsartest/cmd/util.go b/cmd/pulsartest/cmd/util.go deleted file mode 100644 index e83e3423d23..00000000000 --- a/cmd/pulsartest/cmd/util.go +++ /dev/null @@ -1,44 +0,0 @@ -package cmd - -import ( - "github.com/spf13/pflag" -) - -type connFlags struct { - url string - authEnable bool - authType string - jwtPath string - topic string -} - -func processCmdFlags(flags *pflag.FlagSet) (*connFlags, error) { - url, err := flags.GetString("url") - if err != nil { - return nil, err - } - authEnable, err := flags.GetBool("authenticationEnabled") - if err != nil { - return nil, err - } - authType, err := flags.GetString("authenticationType") - if err != nil { - return nil, err - } - jwtPath, err := flags.GetString("jwtTokenPath") - if err != nil { - return nil, err - } - jsTopic, err := flags.GetString("jobsetEventsTopic") - if err != nil { - return nil, err - } - - return &connFlags{ - url: url, - authEnable: authEnable, - authType: authType, - jwtPath: jwtPath, - topic: jsTopic, - }, nil -} diff --git a/cmd/pulsartest/cmd/watch.go b/cmd/pulsartest/cmd/watch.go deleted file mode 100644 index 09e5eda8fe3..00000000000 --- a/cmd/pulsartest/cmd/watch.go +++ /dev/null @@ -1,42 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/pulsartest" -) - -func watchCmd() *cobra.Command { - a := &pulsartest.App{} - - cmd := &cobra.Command{ - Use: "watch", - Short: "Watch for Pulsar events", - Long: "Watch for Pulsar events", - Args: cobra.ExactArgs(0), - PreRunE: func(cmd *cobra.Command, args []string) error { - flags, err := processCmdFlags(cmd.Flags()) - if err != nil { - return err - } - - params := pulsartest.Params{ - Pulsar: configuration.PulsarConfig{ - URL: flags.url, - AuthenticationEnabled: flags.authEnable, - AuthenticationType: flags.authType, - JwtTokenPath: flags.jwtPath, - JobsetEventsTopic: flags.topic, - }, - } - a, err = pulsartest.New(params, "watch") - return err - }, - RunE: func(cmd *cobra.Command, args []string) error { - return a.Watch() - }, - } - - return cmd -} diff --git a/cmd/pulsartest/main.go b/cmd/pulsartest/main.go deleted file mode 100644 index 935ec3fa161..00000000000 --- a/cmd/pulsartest/main.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - log "github.com/sirupsen/logrus" - - "github.com/armadaproject/armada/cmd/pulsartest/cmd" - "github.com/armadaproject/armada/internal/common" -) - -// Config is handled by cmd/params.go -func main() { - common.ConfigureCommandLineLogging() - root := cmd.RootCmd() - if err := root.Execute(); err != nil { - log.Fatal(err) - } -} diff --git a/cmd/scheduler/cmd/prune_database.go b/cmd/scheduler/cmd/prune_database.go index 4ed7aee426e..8b7711d0ace 100644 --- a/cmd/scheduler/cmd/prune_database.go +++ b/cmd/scheduler/cmd/prune_database.go @@ -5,7 +5,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" diff --git a/cmd/simulator/cmd/root.go b/cmd/simulator/cmd/root.go index a3710f3bfab..165b12f2876 100644 --- a/cmd/simulator/cmd/root.go +++ b/cmd/simulator/cmd/root.go @@ -1,15 +1,16 @@ package cmd import ( + "math" "os" "github.com/pkg/errors" "github.com/spf13/cobra" "golang.org/x/exp/maps" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/common/slices" + "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/simulator" "github.com/armadaproject/armada/internal/scheduler/testfixtures" ) @@ -27,6 +28,9 @@ func RootCmd() *cobra.Command { cmd.Flags().Bool("showSchedulerLogs", false, "Show scheduler logs.") cmd.Flags().Int("logInterval", 0, "Log summary statistics every this many events. Disabled if 0.") cmd.Flags().String("eventsOutputFilePath", "", "Path of file to write events to.") + cmd.Flags().Bool("enableFastForward", false, "Skips schedule events when we're in a steady state") + cmd.Flags().Int("hardTerminationMinutes", math.MaxInt, "Limit the time simulated") + cmd.Flags().Int("schedulerCyclePeriodSeconds", 10, "How often we should trigger schedule events") return cmd } @@ -57,6 +61,19 @@ func runSimulations(cmd *cobra.Command, args []string) error { return err } + enableFastForward, err := cmd.Flags().GetBool("enableFastForward") + if err != nil { + return err + } + hardTerminationMinutes, err := cmd.Flags().GetInt("hardTerminationMinutes") + if err != nil { + return err + } + schedulerCyclePeriodSeconds, err := cmd.Flags().GetInt("schedulerCyclePeriodSeconds") + if err != nil { + return err + } + // Load test specs. and config. clusterSpecs, err := simulator.ClusterSpecsFromPattern(clusterPattern) if err != nil { @@ -84,8 +101,8 @@ func runSimulations(cmd *cobra.Command, args []string) error { ctx := armadacontext.Background() ctx.Info("Armada simulator") - ctx.Infof("ClusterSpecs: %v", util.Map(clusterSpecs, func(clusperSpec *simulator.ClusterSpec) string { return clusperSpec.Name })) - ctx.Infof("WorkloadSpecs: %v", util.Map(workloadSpecs, func(workloadSpec *simulator.WorkloadSpec) string { return workloadSpec.Name })) + ctx.Infof("ClusterSpecs: %v", slices.Map(clusterSpecs, func(clusperSpec *simulator.ClusterSpec) string { return clusperSpec.Name })) + ctx.Infof("WorkloadSpecs: %v", slices.Map(workloadSpecs, func(workloadSpec *simulator.WorkloadSpec) string { return workloadSpec.Name })) ctx.Infof("SchedulingConfigs: %v", maps.Keys(schedulingConfigsByFilePath)) var fileWriter *simulator.Writer @@ -108,7 +125,7 @@ func runSimulations(cmd *cobra.Command, args []string) error { for _, clusterSpec := range clusterSpecs { for _, workloadSpec := range workloadSpecs { for schedulingConfigPath, schedulingConfig := range schedulingConfigsByFilePath { - if s, err := simulator.NewSimulator(clusterSpec, workloadSpec, schedulingConfig); err != nil { + if s, err := simulator.NewSimulator(clusterSpec, workloadSpec, schedulingConfig, enableFastForward, hardTerminationMinutes, schedulerCyclePeriodSeconds); err != nil { return err } else { if !showSchedulerLogs { diff --git a/config/armada/config.yaml b/config/armada/config.yaml index 2ec398c0ad7..cb7e1b4df2a 100644 --- a/config/armada/config.yaml +++ b/config/armada/config.yaml @@ -6,7 +6,7 @@ corsAllowedOrigins: - http://localhost:8089 - http://localhost:10000 grpcGatewayPath: "/" -cancelJobsBatchSize: 1000 +queueCacheRefreshPeriod: 10s schedulerApiConnection: armadaUrl: "localhost:50052" grpc: @@ -19,45 +19,12 @@ grpc: permitWithoutStream: true tls: enabled: false -redis: - addrs: - - redis:6379 - password: "" - db: 0 - poolSize: 1000 eventsApiRedis: addrs: - redis:6379 password: "" db: 1 poolSize: 1000 -# Scheduling config used by the job submitChecker. Only needs the following subset of config options. -# You may also wish to configure indexedNodeLabels and indexedTaints to speed up checking. -# -# This config must be consistent with the scheduling config used by the scheduler. -# You may want to insert the scheduling config used for the scheduler automatically, e.g., using PyYAML, to guarantee consistency. -scheduling: - executorTimeout: "60m" - executorUpdateFrequency: "1m" - priorityClasses: - armada-default: - priority: 1000 - preemptible: false - maximumResourceFractionPerQueue: - memory: 1.0 - cpu: 1.0 - armada-preemptible: - priority: 1000 - preemptible: true - indexedResources: - - name: "nvidia.com/gpu" - resolution: "1" - - name: "cpu" - resolution: "100m" - - name: "memory" - resolution: "100Mi" - - name: "ephemeral-storage" - resolution: "1Gi" submission: allowedPriorityClassNames: armada-default: true @@ -116,17 +83,7 @@ postgres: port: 5432 user: postgres password: psw - dbname: postgres + dbname: lookout sslmode: disable queryapi: - enabled: false maxQueryItems: 500 - postgres: - connection: - host: postgres - port: 5432 - user: postgres - password: psw - dbname: postgres - sslmode: disable - diff --git a/config/executor/config.yaml b/config/executor/config.yaml index 087867a9cfe..dd20e4d04bb 100644 --- a/config/executor/config.yaml +++ b/config/executor/config.yaml @@ -34,6 +34,7 @@ kubernetes: QPS: 10000 Burst: 10000 nodeIdLabel: kubernetes.io/hostname + nodeTypeLabel: armadaproject.io/node-type minimumPodAge: 3m failedPodExpiry: 10m maxTerminatedPods: 1000 # Should be lower than kube-controller-managed terminated-pod-gc-threshold (default 12500) diff --git a/config/lookoutingesterv2/config.yaml b/config/lookoutingesterv2/config.yaml index 386254e5949..a1b54ba35ad 100644 --- a/config/lookoutingesterv2/config.yaml +++ b/config/lookoutingesterv2/config.yaml @@ -4,7 +4,7 @@ postgres: port: 5433 user: postgres password: psw - dbname: postgres + dbname: lookout sslmode: disable metricsPort: 9002 pulsar: @@ -19,4 +19,3 @@ batchDuration: 500ms minJobSpecCompressionSize: 1024 userAnnotationPrefix: "armadaproject.io/" maxBackoff: 60 -useLegacyEventConversion: true diff --git a/config/lookoutv2/config.yaml b/config/lookoutv2/config.yaml index 583f09b88af..066fcf74de6 100644 --- a/config/lookoutv2/config.yaml +++ b/config/lookoutv2/config.yaml @@ -13,13 +13,15 @@ postgres: port: 5433 user: postgres password: psw - dbname: postgres + dbname: lookout sslmode: disable prunerConfig: - expireAfter: 1008h # 42 days, 6 weeks + expireAfter: 1008h # 42 days / 6 weeks + deduplicationExpireAfter: 168h # 7 days timeout: 1h batchSize: 1000 uiConfig: + backend: "jsonb" armadaApiBaseUrl: "http://armada-server:8080" userAnnotationPrefix: "armadaproject.io/" binocularsBaseUrlPattern: "http://armada-binoculars:8080" diff --git a/config/queryapi/config.yaml b/config/queryapi/config.yaml deleted file mode 100644 index 366549097b3..00000000000 --- a/config/queryapi/config.yaml +++ /dev/null @@ -1,21 +0,0 @@ -http: - port: 8080 -grpc: - port: 50052 - keepaliveParams: - maxConnectionIdle: 5m - time: 120s - timeout: 20s - keepaliveEnforcementPolicy: - minTime: 10s - permitWithoutStream: true - tls: - enabled: false -postgres: - connection: - host: postgres - port: 5432 - user: postgres - password: psw - dbname: postgres - sslmode: disable diff --git a/config/scheduler/config.yaml b/config/scheduler/config.yaml index de1b8979eda..07c432e1218 100644 --- a/config/scheduler/config.yaml +++ b/config/scheduler/config.yaml @@ -5,6 +5,8 @@ executorTimeout: 1h databaseFetchSize: 1000 pulsarSendTimeout: 5s internedStringsCacheSize: 100000 +queueRefreshPeriod: 10s +disableSubmitCheck: false metrics: port: 9000 refreshInterval: 30s @@ -36,19 +38,16 @@ pulsar: compressionType: zlib compressionLevel: faster maxAllowedMessageSize: 4194304 #4Mi -redis: - addrs: - - redis:6379 - password: "" - db: 0 - poolSize: 1000 +armadaApi: + armadaUrl: "server:50051" + forceNoTls: true postgres: connection: host: postgres port: 5432 user: postgres password: psw - dbname: postgres + dbname: scheduler sslmode: disable leader: mode: standalone @@ -75,10 +74,17 @@ grpc: enabled: false # You may want to configure indexedNodeLabels and indexedTaints to speed up scheduling. scheduling: + supportedResourceTypes: + - name: memory + resolution: "1" + - name: cpu + resolution: "1m" + - name: ephemeral-storage + resolution: "1" + - name: nvidia.com/gpu + resolution: "1" disableScheduling: false enableAssertions: false - nodeEvictionProbability: 1.0 - nodeOversubscriptionEvictionProbability: 1.0 protectedFractionOfFairShare: 1.0 nodeIdLabel: "kubernetes.io/hostname" priorityClasses: @@ -120,7 +126,7 @@ scheduling: executorTimeout: "10m" maxUnacknowledgedJobsPerExecutor: 2500 alwaysAttemptScheduling: false - executorUpdateFrequency: "1m" + executorUpdateFrequency: "60s" failureProbabilityEstimation: # Optimised default parameters. numInnerIterations: 10 diff --git a/config/scheduleringester/config.yaml b/config/scheduleringester/config.yaml index 0056896012a..717bdbbf4c9 100644 --- a/config/scheduleringester/config.yaml +++ b/config/scheduleringester/config.yaml @@ -4,7 +4,7 @@ postgres: port: 5432 user: postgres password: psw - dbname: postgres + dbname: scheduler sslmode: disable metrics: port: 9003 diff --git a/developer/dependencies/postgres-init.sh b/developer/dependencies/postgres-init.sh index 6ecada98426..92e5589ee8f 100755 --- a/developer/dependencies/postgres-init.sh +++ b/developer/dependencies/postgres-init.sh @@ -3,8 +3,10 @@ set -e psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL CREATE USER docker; - CREATE DATABASE postgresv2; - GRANT ALL PRIVILEGES ON DATABASE postgresv2 TO docker; + CREATE DATABASE lookout; + GRANT ALL PRIVILEGES ON DATABASE lookout TO docker; CREATE DATABASE jobservice; GRANT ALL PRIVILEGES ON DATABASE jobservice TO docker; + CREATE DATABASE scheduler; + GRANT ALL PRIVILEGES ON DATABASE scheduler TO docker; EOSQL diff --git a/developer/env/docker/lookoutv2.env b/developer/env/docker/lookoutv2.env index 9e929e7c1d6..1c0d8110f29 100644 --- a/developer/env/docker/lookoutv2.env +++ b/developer/env/docker/lookoutv2.env @@ -1,4 +1,3 @@ -ARMADA_POSTGRES_CONNECTION_DBNAME=postgresv2 ARMADA_POSTGRES_CONNECTION_PORT=5432 ARMADA_CORSALLOWEDORIGINS="http://localhost:3000,http://localhost:10000,http://localhost:8082,http://example.com:10000" ARMADA_UICONFIG_ARMADAAPIBASEURL="http://localhost:8080" diff --git a/developer/env/docker/scheduler.env b/developer/env/docker/scheduler.env index fd6827a022f..40a2b71fb69 100644 --- a/developer/env/docker/scheduler.env +++ b/developer/env/docker/scheduler.env @@ -1 +1,3 @@ ARMADA_HTTP_PORT=8081 +ARMADA_QUEUEREFRESHPERIOD=1s +ARMADA_SCHEDULING_EXECUTORUPDATEFREQUENCY=1s diff --git a/developer/env/docker/server.env b/developer/env/docker/server.env index 14dd9e17473..6b52f9b6342 100644 --- a/developer/env/docker/server.env +++ b/developer/env/docker/server.env @@ -1,3 +1,3 @@ -EXECUTOR_UPDATE_INTERVAL="1s" +ARMADA_QUEUECACHEREFRESHPERIOD="1s" ARMADA_CORSALLOWEDORIGINS="http://localhost:3000,http://localhost:10000,http://example.com:10000" diff --git a/developer/env/local/scheduler.env b/developer/env/local/scheduler.env index e564f45a9db..9bd11e5f31f 100644 --- a/developer/env/local/scheduler.env +++ b/developer/env/local/scheduler.env @@ -1,5 +1,4 @@ ARMADA_HTTP_PORT:8081 -ARMADA_REDIS_ADDRS=localhost:6379 ARMADA_POSTGRES_CONNECTION_HOST=localhost ARMADA_PULSAR_URL=pulsar://localhost:6650 ARMADA_METRICS_PORT=9004 diff --git a/developer/env/local/server.env b/developer/env/local/server.env index a049a1728d8..1daa14496d8 100644 --- a/developer/env/local/server.env +++ b/developer/env/local/server.env @@ -1,6 +1,5 @@ EXECUTOR_UPDATE_INTERVAL="1s" ARMADA_CORSALLOWEDORIGINS=="http://localhost:3000,http://localhost:8089,http://localhost:10000,http://example.com:10000,http://example.com:8089" -ARMADA_REDIS_ADDRS=localhost:6379 ARMADA_EVENTSAPIREDIS_ADDRS=localhost:6379 ARMADA_POSTGRES_CONNECTION_HOST=localhost ARMADA_PULSAR_URL=pulsar://localhost:6650 diff --git a/docker-compose.yaml b/docker-compose.yaml index cd43d7dcc70..4e96216ef27 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,5 +1,3 @@ -version: "3.8" - networks: kind: external: true @@ -34,7 +32,7 @@ services: - kind pulsar: - image: ${PULSAR_IMAGE:-apachepulsar/pulsar:2.10.4} + image: ${PULSAR_IMAGE:-apachepulsar/pulsar:3.0.2} container_name: pulsar volumes: - ./developer/dependencies/pulsar.conf:/conf/pulsar.conf @@ -61,6 +59,7 @@ services: - "go-cache:/root/.cache/go-build:rw" - "gomod-cache:/go/pkg/mod:rw" depends_on: + - lookoutv2-migration - eventingester working_dir: /app env_file: diff --git a/docs/demo.md b/docs/demo.md index bcd3f6a7008..7b042944695 100644 --- a/docs/demo.md +++ b/docs/demo.md @@ -12,7 +12,7 @@ The Armada UI (lookout) can be found at this URL: ## Local prerequisites - Git -- Go 1.20 +- Go 1.21 ## Obtain the armada source Clone [this](https://github.com/armadaproject/armada) repository: @@ -79,8 +79,8 @@ Create queues, submit some jobs, and monitor progress: ### Queue Creation Use a unique name for the queue. Make sure you remember it for the next steps. ```bash -armadactl create queue $QUEUE_NAME --priorityFactor 1 -armadactl create queue $QUEUE_NAME --priorityFactor 2 +armadactl create queue $QUEUE_NAME --priority-factor 1 +armadactl create queue $QUEUE_NAME --priority-factor 2 ``` For queues created in this way, user and group owners of the queue have permissions to: diff --git a/docs/developer.md b/docs/developer.md index 1b4fb4655f8..7c3a721bb85 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -51,7 +51,7 @@ Please see these documents for more information about Armadas Design: ## Pre-requisites -- [Go](https://go.dev/doc/install) (version 1.20 or later) +- [Go](https://go.dev/doc/install) (version 1.21 or later) - gcc (for Windows, see, e.g., [tdm-gcc](https://jmeubank.github.io/tdm-gcc/)) - [mage](https://magefile.org/) - [docker](https://docs.docker.com/get-docker/) diff --git a/docs/developer/aws-ec2.md b/docs/developer/aws-ec2.md index b3909aa4e36..66ffdbe468e 100644 --- a/docs/developer/aws-ec2.md +++ b/docs/developer/aws-ec2.md @@ -97,7 +97,7 @@ ssh into your EC2 instance, become root and download the go package from [golang 1. Extract the archive you downloaded into /usr/local, creating a Go tree in /usr/local/go with the following command: ``` -rm -rf /usr/local/go && tar -C /usr/local -xzf go1.20.1.linux-amd64.tar.gz +rm -rf /usr/local/go && tar -C /usr/local -xzf go1.21.1.linux-amd64.tar.gz ``` 2. Configure .bashrc @@ -114,7 +114,7 @@ Add the following lines to your ~/.bashrc file as well, also create a golang fol ``` # Go envs -export GOVERSION=go1.20.1 +export GOVERSION=go1.21.1 export GO_INSTALL_DIR=/usr/local/go export GOROOT=$GO_INSTALL_DIR export GOPATH=/home/ec2-user/golang @@ -129,7 +129,7 @@ Verify that you’ve installed Go by opening a command prompt and typing the fol ``` go version -go version go1.20.1 linux/amd64 +go version go1.21.1 linux/amd64 ``` - ### Install [Kind](https://dev.to/rajitpaul_savesoil/setup-kind-kubernetes-in-docker-on-linux-3kbd) diff --git a/docs/developer/manual-localdev.md b/docs/developer/manual-localdev.md index af8c6d65537..24321f740d6 100644 --- a/docs/developer/manual-localdev.md +++ b/docs/developer/manual-localdev.md @@ -2,7 +2,7 @@ Here, we give an overview of a development setup for Armada that gives users full control over the Armada components and dependencies. -Before starting, please ensure you have installed [Go](https://go.dev/doc/install) (version 1.20 or later), gcc (for Windows, see, e.g., [tdm-gcc](https://jmeubank.github.io/tdm-gcc/)), [mage](https://magefile.org/), [docker](https://docs.docker.com/get-docker/), [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl), and, if you need to compile `.proto` files, [protoc](https://github.com/protocolbuffers/protobuf/releases). +Before starting, please ensure you have installed [Go](https://go.dev/doc/install) (version 1.21 or later), gcc (for Windows, see, e.g., [tdm-gcc](https://jmeubank.github.io/tdm-gcc/)), [mage](https://magefile.org/), [docker](https://docs.docker.com/get-docker/), [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl), and, if you need to compile `.proto` files, [protoc](https://github.com/protocolbuffers/protobuf/releases). For a full list of mage commands, run `mage -l`. diff --git a/docs/developer/ubuntu-setup.md b/docs/developer/ubuntu-setup.md index f5307816e26..db3733c6e50 100644 --- a/docs/developer/ubuntu-setup.md +++ b/docs/developer/ubuntu-setup.md @@ -75,12 +75,12 @@ $ sudo apt install gcc make unzip ``` ### Install Go, Protobuffers, and kubectl tools -Install the Go compiler and associated tools. Currently, the latest version is 1.20.5, but there may +Install the Go compiler and associated tools. Currently, the latest version is 1.21.1, but there may be newer versions: ``` -$ curl --location -O https://go.dev/dl/go1.20.5.linux-amd64.tar.gz -$ sudo tar -C /usr/local -xzvf go1.20.5.linux-amd64.tar.gl +$ curl --location -O https://go.dev/dl/go1.21.1.linux-amd64.tar.gz +$ sudo tar -C /usr/local -xzvf go1.21.1.linux-amd64.tar.gl $ echo 'export PATH=$PATH:/usr/local/go/bin' > go.sh $ sudo cp go.sh /etc/profile.d/ ``` diff --git a/docs/python_armada_client.md b/docs/python_armada_client.md index d8ca69a72fe..e2dc1228f89 100644 --- a/docs/python_armada_client.md +++ b/docs/python_armada_client.md @@ -15,17 +15,21 @@ For the api definitions: [https://armadaproject.io/api](https://armadaproject.io/api) -### _class_ armada_client.client.ArmadaClient(channel) +### _class_ armada_client.client.ArmadaClient(channel, event_timeout=datetime.timedelta(seconds=900)) Client for accessing Armada over gRPC. * **Parameters** - **channel** – gRPC channel used for authentication. See + + * **channel** – gRPC channel used for authentication. See [https://grpc.github.io/grpc/python/grpc.html](https://grpc.github.io/grpc/python/grpc.html) for more information. + * **event_timeout** (*datetime.timedelta*) – + + * **Returns** @@ -33,23 +37,22 @@ Client for accessing Armada over gRPC. -#### cancel_jobs(queue=None, job_id=None, job_set_id=None) +#### cancel_jobs(queue, job_set_id, job_id=None) Cancel jobs in a given queue. -Uses the CancelJobs RPC to cancel jobs. Either job_id or -job_set_id is required. +Uses the CancelJobs RPC to cancel jobs. * **Parameters** - * **queue** (*str** | **None*) – The name of the queue + * **queue** (*str*) – The name of the queue - * **job_id** (*str** | **None*) – The name of the job id (this or job_set_id required) + * **job_set_id** (*str*) – The name of the job set id - * **job_set_id** (*str** | **None*) – An array of JobSubmitRequestItems. (this or job_id required) + * **job_id** (*str** | **None*) – The name of the job id (optional), if empty - cancel all jobs @@ -252,6 +255,28 @@ Health check for Event Service. +#### get_job_details(job_ids) +Retrieves the details of a job from Armada. + + +* **Parameters** + + **job_ids** (*List**[**str**]*) – A list of unique job identifiers. + + + +* **Returns** + + The Armada job details response. + + + +* **Return type** + + armada.job_pb2.JobDetailsResponse + + + #### get_job_events_stream(queue, job_set_id, from_message_id=None) Get event stream for a job set. @@ -293,6 +318,50 @@ for event in events: +#### get_job_run_details(run_ids) +Retrieves the details of a job run from Armada. + + +* **Parameters** + + **run_ids** (*List**[**str**]*) – A list of unique job run identifiers. + + + +* **Returns** + + The Armada run details response. + + + +* **Return type** + + armada.job_pb2.JobRunDetailsResponse + + + +#### get_job_status(job_ids) +Retrieves the status of a list of jobs from Armada. + + +* **Parameters** + + **job_ids** (*List**[**str**]*) – A list of unique job identifiers. + + + +* **Returns** + + The response from the server containing the job status. + + + +* **Return type** + + JobStatusResponse + + + #### get_queue(name) Get the queue by name. @@ -317,11 +386,42 @@ Uses the GetQueue RPC to get the queue. -#### reprioritize_jobs(new_priority, job_ids=None, job_set_id=None, queue=None) +#### preempt_jobs(queue, job_set_id, job_id) +Preempt jobs in a given queue. + +Uses the PreemptJobs RPC to preempt jobs. + + +* **Parameters** + + + * **queue** (*str*) – The name of the queue + + + * **job_set_id** (*str*) – The name of the job set id + + + * **job_id** (*str*) – The id the job + + + +* **Returns** + + An empty response. + + + +* **Return type** + + google.protobuf.empty_pb2.Empty + + + +#### reprioritize_jobs(new_priority, job_ids, job_set_id, queue) Reprioritize jobs with new_priority value. Uses ReprioritizeJobs RPC to set a new priority on a list of jobs -or job set. +or job set (if job_ids are set to None or empty). * **Parameters** @@ -333,10 +433,10 @@ or job set. * **job_ids** (*List**[**str**] **| **None*) – A list of job ids to change priority of - * **job_set_id** (*str** | **None*) – A job set id including jobs to change priority of + * **job_set_id** (*str*) – A job set id including jobs to change priority of - * **queue** (*str** | **None*) – The queue the jobs are in + * **queue** (*str*) – The queue the jobs are in @@ -364,7 +464,7 @@ Health check for Submit Service. #### submit_jobs(queue, job_set_id, job_request_items) -Submit a armada job. +Submit an armada job. Uses SubmitJobs RPC to submit a job. diff --git a/docs/quickstart/index.md b/docs/quickstart/index.md index e1c71ff98d3..1b77479b092 100644 --- a/docs/quickstart/index.md +++ b/docs/quickstart/index.md @@ -45,8 +45,8 @@ Create queues, submit some jobs and monitor progress: ### Queue Creation ```bash -./armadactl create queue queue-a --priorityFactor 1 -./armadactl create queue queue-b --priorityFactor 2 +./armadactl create queue queue-a --priority-factor 1 +./armadactl create queue queue-b --priority-factor 2 ``` For queues created in this way, user and group owners of the queue have permissions to: - submit jobs diff --git a/docs/quickstart/job-queue-a.yaml b/docs/quickstart/job-queue-a.yaml index 5338435668c..2ce3e10b53d 100644 --- a/docs/quickstart/job-queue-a.yaml +++ b/docs/quickstart/job-queue-a.yaml @@ -1,7 +1,8 @@ queue: queue-a jobSetId: job-set-1 jobs: - - priority: 0 + - namespace: default + priority: 0 podSpec: terminationGracePeriodSeconds: 0 restartPolicy: Never diff --git a/docs/quickstart/job-queue-b.yaml b/docs/quickstart/job-queue-b.yaml index c888ef531e0..a6de6e50d7c 100644 --- a/docs/quickstart/job-queue-b.yaml +++ b/docs/quickstart/job-queue-b.yaml @@ -1,7 +1,8 @@ queue: queue-b jobSetId: job-set-1 jobs: - - priority: 0 + - namespace: default + priority: 0 podSpec: terminationGracePeriodSeconds: 0 restartPolicy: Never diff --git a/e2e/armadactl_test/armadactl_test.go b/e2e/armadactl_test/armadactl_test.go index cb7ac1e5f42..f5ee2fd956d 100644 --- a/e2e/armadactl_test/armadactl_test.go +++ b/e2e/armadactl_test/armadactl_test.go @@ -8,7 +8,6 @@ import ( "strings" "testing" - "github.com/avast/retry-go" "github.com/google/uuid" "github.com/stretchr/testify/require" @@ -227,37 +226,6 @@ jobs: require.True(t, strings.Contains(out, s), "expected output to contain '%s', but got '%s'", s, out) } - // analyze - err = retry.Do( - func() error { - err = app.Analyze(name, "set1") - if err != nil { - return fmt.Errorf("expected no error, but got %s", err) - } - - out = buf.String() - buf.Reset() - - if strings.Contains(out, "Found no events associated") { - return fmt.Errorf("no events found, got response %s", out) - } - - for _, s := range []string{fmt.Sprintf("Querying queue %s for job set set1", name), "api.JobSubmittedEvent", "api.JobQueuedEvent"} { - if !strings.Contains(out, s) { - return fmt.Errorf("expected output to contain '%s', but got '%s'", s, out) - } - } - - return nil - }, - retry.Attempts(100), // default retry delay is 100ms and it may take 10 seconds for the server to commit a job - ) - require.NoError(t, err, "error on calling analyze") - // resources - // no need for retry since we can be sure the job has been committed to the db at this point - err = app.Resources(name, "set1") - require.NoError(t, err) - out = buf.String() buf.Reset() for _, s := range []string{"Job ID:", "maximum used resources:", "\n"} { @@ -265,7 +233,7 @@ jobs: } // reprioritize - err = app.Reprioritize("", name, "set1", 2) + err = app.ReprioritizeJobSet(name, "set1", 2) require.NoError(t, err) out = buf.String() @@ -275,7 +243,7 @@ jobs: } // cancel - err = app.Cancel(name, "set1", "") + err = app.CancelJob(name, "set1", "") require.NoError(t, err) out = buf.String() diff --git a/e2e/pulsar_test/pulsar_test.go b/e2e/pulsar_test/pulsar_test.go deleted file mode 100644 index 7d526026108..00000000000 --- a/e2e/pulsar_test/pulsar_test.go +++ /dev/null @@ -1,1332 +0,0 @@ -package pulsar_test - -import ( - "context" - "fmt" - "io" - "net/http" - "os" - "os/exec" - "strings" - "testing" - "time" - - "github.com/apache/pulsar-client-go/pulsar" - pulsarlog "github.com/apache/pulsar-client-go/pulsar/log" - "github.com/gogo/protobuf/proto" - "github.com/google/go-cmp/cmp" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - - "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/armadaevents" - "github.com/armadaproject/armada/pkg/client" -) - -// Pulsar configuration. Must be manually reconciled with changes to the test setup or Armada. -const ( - pulsarUrl = "pulsar://localhost:6650" - pulsarTopic = "events" - pulsarSubscription = "e2e-test" - armadaUrl = "localhost:50051" - armadaQueueName = "e2e-test-queue" - armadaUserId = "anonymous" - defaultPulsarTimeout = 60 * time.Second -) - -// We setup kind to expose ingresses on this ULR. -const ingressUrl = "http://localhost:5001" - -// Armada exposes all ingresses on this path. -// Routing to the correct service is done using the hostname header. -const ingressPath = "/" - -// Namespace created by the test setup. Used when submitting test jobs. -const userNamespace = "personal-anonymous" - -// The submit server should automatically add default tolerations. -// These must be manually updated to match the default tolerations in the server config. -var expectedTolerations = []v1.Toleration{ - { - Key: "example.com/default_toleration", - Operator: v1.TolerationOpEqual, - Value: "true", - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: nil, - }, -} - -// Test publishing and receiving a message to/from Pulsar. -func TestPublishReceive(t *testing.T) { - err := withSetup(func(ctx context.Context, _ api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - _, err := producer.Send(context.Background(), &pulsar.ProducerMessage{ - Payload: []byte("hello"), - }) - if err != nil { - return err - } - - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - msg, err := consumer.Receive(ctxWithTimeout) - if err != nil { - return err - } - assert.Equal(t, "hello", string(msg.Payload())) - - return nil - }) - assert.NoError(t, err) -} - -// Test that submitting many jobs results in the correct sequence of Pulsar message being produced for each job. -func TestSubmitJobs(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - numJobs := 2 - req := createJobSubmitRequest(numJobs) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - numEventsExpected := numJobs * 6 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if ok := assert.NotNil(t, sequence); !ok { - return nil - } - - for i, resi := range res.JobResponseItems { - reqi := req.JobRequestItems[i] - - jobId, err := armadaevents.ProtoUuidFromUlidString(resi.JobId) - if err != nil { - return err - } - - expected := armadaevents.ExpectedSequenceFromRequestItem(armadaQueueName, armadaUserId, userNamespace, req.JobSetId, jobId, reqi) - actual, err := filterSequenceByJobId(sequence, jobId) - if err != nil { - return err - } - if !isSequencef(t, expected, actual, "Event sequence error; printing diff:\n%s", cmp.Diff(expected, actual)) { - t.FailNow() - } - } - - return nil - }) - assert.NoError(t, err) -} - -// TODO: Make testsuite test. Or unit test. -func TestDedup(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - numJobs := 2 - clientId := uuid.New().String() - originalJobIds := make([]string, numJobs) - - // The first time, all jobs should be submitted as-is. - req := createJobSubmitRequestWithClientId(numJobs, clientId) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - for i := 0; i < numJobs; i++ { - originalJobIds[i] = res.JobResponseItems[i].GetJobId() - } - - numEventsExpected := numJobs * 6 - _, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - // The second time, job ids should be replaced with the original ids. - req = createJobSubmitRequestWithClientId(numJobs, clientId) - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - res, err = client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - for i := 0; i < numJobs; i++ { - assert.Equal(t, originalJobIds[i], res.JobResponseItems[i].GetJobId()) - } - - numEventsExpected = numJobs // one duplicate detected message per job - _, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - // Here, some ids should be replaced and some should be new. - req = createJobSubmitRequestWithClientId(numJobs, clientId) - req2 := createJobSubmitRequestWithClientId(numJobs, uuid.New().String()) - req.JobRequestItems = append(req.JobRequestItems, req2.JobRequestItems...) - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - res, err = client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, 2*numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - for i := 0; i < numJobs; i++ { - assert.Equal(t, originalJobIds[i], res.JobResponseItems[i].GetJobId()) - } - - numEventsExpected = numJobs*6 + numJobs - _, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - return nil - }) - assert.NoError(t, err) -} - -// Test submitting several jobs, cancelling all of them, and checking that at least 1 is cancelled. -func TestSubmitCancelJobs(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - // The ingress job runs until canceled. - req := createJobSubmitRequestWithIngress() - numJobs := len(req.JobRequestItems) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - // Wait for the jobs to be running (submitted, leased, assigned, running, ingress info). - numEventsExpected := numJobs * 5 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if ok := assert.NotNil(t, sequence); !ok { - return nil - } - - // Cancel the jobs - for _, r := range res.JobResponseItems { - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - res, err := client.CancelJobs(ctxWithTimeout, &api.JobCancelRequest{ - JobId: r.JobId, - // Leave JobSetId and Queue empty to check that these are auto-populated. - }) - if !assert.NoError(t, err) { - return nil - } - assert.Equal(t, []string{r.JobId}, res.CancelledIds) - } - - // Check that the job is cancelled (cancel, cancelled). - numEventsExpected = numJobs * 2 - sequences, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence = flattenSequences(sequences) - if ok := assert.NotNil(t, sequence); !ok { - return nil - } - - for _, resi := range res.JobResponseItems { - jobId, err := armadaevents.ProtoUuidFromUlidString(resi.JobId) - if err != nil { - return err - } - - actual, err := filterSequenceByJobId(sequence, jobId) - if err != nil { - return err - } - - expected := &armadaevents.EventSequence{ - Queue: armadaQueueName, - JobSetName: req.JobSetId, - UserId: armadaUserId, - Events: []*armadaevents.EventSequence_Event{ - {Event: &armadaevents.EventSequence_Event_CancelJob{}}, - {Event: &armadaevents.EventSequence_Event_CancelledJob{}}, - }, - } - if ok := isSequenceTypef(t, expected, actual, "Event sequence error; printing diff:\n%s", cmp.Diff(expected, actual)); !ok { - return nil - } - } - - return nil - }) - assert.NoError(t, err) -} - -// Test cancelling a job set. -func TestSubmitCancelJobSet(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - // Submit a few jobs that fail after a few seconds - numJobs := 2 - req := createJobSubmitRequestWithError(numJobs) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - // Workaround to let jobs get through pulsar into redis before cancelling - otherwise cancel does nothing - time.Sleep(time.Second * 2) - - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - _, err = client.CancelJobs(ctxWithTimeout, &api.JobCancelRequest{ - JobSetId: req.JobSetId, - Queue: req.Queue, - }) - if !assert.NoError(t, err) { - return nil - } - eventFilter := func(e *armadaevents.EventSequence_Event) bool { - switch e.GetEvent().(type) { - case *armadaevents.EventSequence_Event_SubmitJob, - *armadaevents.EventSequence_Event_CancelJob, - *armadaevents.EventSequence_Event_CancelledJob: - return true - } - return false - } - - // Test that we get submit, cancel job set, and cancelled messages. - numEventsExpected := numJobs + numJobs + numJobs - sequences, err := receiveJobSetSequencesWithEventFilter(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout, eventFilter) - if err != nil { - return err - } - - actual := flattenSequences(sequences) - if ok := assert.NotNil(t, actual); !ok { - return nil - } - - expected := &armadaevents.EventSequence{ - Queue: armadaQueueName, - JobSetName: req.JobSetId, - UserId: armadaUserId, - Events: []*armadaevents.EventSequence_Event{}, - } - for range res.JobResponseItems { - expected.Events = append( - expected.Events, - &armadaevents.EventSequence_Event{ - Event: &armadaevents.EventSequence_Event_SubmitJob{}, - }, - ) - } - for range res.JobResponseItems { - expected.Events = append( - expected.Events, - &armadaevents.EventSequence_Event{ - Event: &armadaevents.EventSequence_Event_CancelJob{}, - }, - ) - } - for range res.JobResponseItems { - expected.Events = append( - expected.Events, - &armadaevents.EventSequence_Event{ - Event: &armadaevents.EventSequence_Event_CancelledJob{}, - }, - ) - } - if ok := isSequenceTypef(t, expected, actual, "Event sequence error; printing diff:\n%s", cmp.Diff(expected, actual)); !ok { - return nil - } - - return nil - }) - assert.NoError(t, err) -} - -func TestIngress(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - req := createJobSubmitRequestWithIngress() - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, 1, len(res.JobResponseItems)); !ok { - return nil - } - - numEventsExpected := 5 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if !assert.NotNil(t, sequence) { - return nil - } - - if !assert.Equal(t, numEventsExpected, len(sequence.Events)) { - t.FailNow() - } - - // Armada generated a special event with info on how to connect to the ingress. - ingressInfoEvent, ok := sequence.Events[numEventsExpected-1].GetEvent().(*armadaevents.EventSequence_Event_StandaloneIngressInfo) - if !assert.True(t, ok) { - t.FailNow() - } - ingressInfo := ingressInfoEvent.StandaloneIngressInfo - - actualJobId, err := armadaevents.UlidStringFromProtoUuid(ingressInfo.GetJobId()) - if err != nil { - return err - } - assert.Equal(t, res.JobResponseItems[0].JobId, actualJobId) - - // Hostname used to route requests to the service setup for the created pod. - containerPort := int32(80) - host, ok := ingressInfo.IngressAddresses[containerPort] - if !assert.True(t, ok) { - t.FailNow() - } - - // It takes a few seconds for the ingress to become active. - // Ideally, we would make repeated requests up to some max timeout instead of using a constant 10s. - time.Sleep(10 * time.Second) - - // Make a get request to this hostname to verify that we get a response from the pod. - httpClient := &http.Client{} - httpReq, err := http.NewRequest("GET", ingressUrl+ingressPath, nil) - if err != nil { - return err - } - httpReq.Host = host - httpRes, err := httpClient.Do(httpReq) - if err != nil { - return err - } - httpResBytes, err := io.ReadAll(httpRes.Body) - if err != nil { - return err - } - assert.Contains(t, string(httpResBytes), "If you see this page, the nginx web server is successfully installed") - - // Cancel the job to clean it up. - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - _, err = client.CancelJobs(ctxWithTimeout, &api.JobCancelRequest{ - JobId: res.JobResponseItems[0].JobId, - JobSetId: req.JobSetId, - Queue: req.Queue, - }) - if !assert.NoError(t, err) { - return nil - } - - numEventsExpected = 3 - sequences, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence = flattenSequences(sequences) - if !assert.NotNil(t, sequence) { - return nil - } - - if !assert.Equal(t, numEventsExpected, len(sequence.Events)) { - t.FailNow() - } - - return nil - }) - assert.NoError(t, err) -} - -func TestService(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - // Create a job running an nginx server accessible via a headless service. - req := createJobSubmitRequestWithService() - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, 1, len(res.JobResponseItems)); !ok { - return nil - } - - numEventsExpected := 5 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if !assert.NotNil(t, sequence) { - return nil - } - - if !assert.Equal(t, numEventsExpected, len(sequence.Events)) { - t.FailNow() - } - - // It takes a few seconds for the service to become active. - // Ideally, we would make repeated requests up to some max timeout instead of using a constant 10s. - time.Sleep(10 * time.Second) - - // Get the ip of the nginx pod via the k8s api. - podIndex := 0 - endpointName := fmt.Sprintf("armada-%s-%d-headless", res.GetJobResponseItems()[0].JobId, podIndex) - out, err := exec.Command("kubectl", "get", "endpoints", endpointName, "--namespace", userNamespace, "-o", "jsonpath='{.subsets[0].addresses[0].ip}'"). - Output() - if !assert.NoError(t, err) { - t.FailNow() - } - address := strings.ReplaceAll(string(out), "'", "") + ":80" - - // Submit a new job that queries that ip using wget. - wgetReq := createWgetJobRequest(address) - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - wgetRes, err := client.SubmitJobs(ctxWithTimeout, wgetReq) - if err != nil { - return err - } - - if ok := assert.Equal(t, 1, len(wgetRes.JobResponseItems)); !ok { - return nil - } - - // Check that the wget job completes successfully. - numEventsExpected = 5 - sequences, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, wgetReq.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence = flattenSequences(sequences) - if !assert.NotNil(t, sequence) { - return nil - } - - if !assert.Equal(t, numEventsExpected, len(sequence.Events)) { - t.FailNow() - } - - _, ok := sequence.Events[numEventsExpected-1].GetEvent().(*armadaevents.EventSequence_Event_JobSucceeded) - assert.True(t, ok) - - // Cancel the original job (i.e., the nginx job). - ctxWithTimeout, _ = context.WithTimeout(context.Background(), time.Second) - _, err = client.CancelJobs(ctxWithTimeout, &api.JobCancelRequest{ - JobId: res.JobResponseItems[0].JobId, - JobSetId: req.JobSetId, - Queue: req.Queue, - }) - if !assert.NoError(t, err) { - return nil - } - - numEventsExpected = 3 - sequences, err = receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence = flattenSequences(sequences) - if !assert.NotNil(t, sequence) { - return nil - } - - if !assert.Equal(t, numEventsExpected, len(sequence.Events)) { - t.FailNow() - } - - return nil - }) - assert.NoError(t, err) -} - -// Test that submitting many jobs results in the correct sequence of Pulsar message being produced for each job. -// For jobs that contain multiple PodSpecs, services, and ingresses. -func TestSubmitJobsWithEverything(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - numJobs := 1 - req := createJobSubmitRequestWithEverything(numJobs) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - numEventsExpected := numJobs * 7 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if ok := assert.NotNil(t, sequence); !ok { - return nil - } - - for i, resi := range res.JobResponseItems { - reqi := req.JobRequestItems[i] - - jobId, err := armadaevents.ProtoUuidFromUlidString(resi.JobId) - if err != nil { - return err - } - - expected := armadaevents.ExpectedSequenceFromRequestItem(armadaQueueName, armadaUserId, userNamespace, req.JobSetId, jobId, reqi) - actual, err := filterSequenceByJobId(sequence, jobId) - if err != nil { - return err - } - - // Because the order of the ingress info messages varies (e.g., they may arrive after the job has completed), - // we filter those out and check them separately. - actual, standaloneIngressInfos, err := filterOutStandaloneIngressInfo(actual) - if err != nil { - return err - } - - if ok := isSequencef(t, expected, actual, "Event sequence error; printing diff:\n%s", cmp.Diff(expected, actual)); !ok { - return nil - } - - if !assert.Equal(t, 1, len(standaloneIngressInfos)) { - return nil - } - standaloneIngressInfo := standaloneIngressInfos[0] - fmt.Printf("standaloneIngressInfo:\n%+v\n", standaloneIngressInfo) - assert.Equal(t, standaloneIngressInfo.ObjectMeta.ExecutorId, "Cluster1") - assert.Equal(t, standaloneIngressInfo.ObjectMeta.Namespace, "personal-anonymous") - assert.Equal(t, standaloneIngressInfo.PodNamespace, "personal-anonymous") - _, ok := standaloneIngressInfo.IngressAddresses[5000] - assert.True(t, ok) - _, ok = standaloneIngressInfo.IngressAddresses[6000] - assert.True(t, ok) - } - - return nil - }) - assert.NoError(t, err) -} - -func TestSubmitJobWithError(t *testing.T) { - err := withSetup(func(ctx context.Context, client api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error { - // Submit a few jobs that fail after a few seconds - numJobs := 1 - req := createJobSubmitRequestWithError(numJobs) - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - res, err := client.SubmitJobs(ctxWithTimeout, req) - if err != nil { - return err - } - - if ok := assert.Equal(t, numJobs, len(res.JobResponseItems)); !ok { - return nil - } - - // Test that we get errors messages. - numEventsExpected := numJobs * 5 - sequences, err := receiveJobSetSequences(ctx, consumer, armadaQueueName, req.JobSetId, numEventsExpected, defaultPulsarTimeout) - if err != nil { - return err - } - - sequence := flattenSequences(sequences) - if ok := assert.NotNil(t, sequence); !ok { - return nil - } - - for _, resi := range res.JobResponseItems { - - jobId, err := armadaevents.ProtoUuidFromUlidString(resi.JobId) - if err != nil { - return err - } - - actual, err := filterSequenceByJobId(sequence, jobId) - if err != nil { - return err - } - if !assert.NotEmpty(t, actual.Events) { - return nil - } - expected := &armadaevents.EventSequence{ - Queue: req.Queue, - JobSetName: req.JobSetId, - UserId: armadaUserId, - Events: []*armadaevents.EventSequence_Event{ - { - Event: &armadaevents.EventSequence_Event_JobRunErrors{ - JobRunErrors: &armadaevents.JobRunErrors{ - JobId: jobId, - }, - }, - }, - { - Event: &armadaevents.EventSequence_Event_JobErrors{ - JobErrors: &armadaevents.JobErrors{ - JobId: jobId, - }, - }, - }, - }, - } - - // Only check the two final events. - actual.Events = actual.Events[len(actual.Events)-2:] - if !isSequencef(t, expected, actual, "Event sequence error; printing diff:\n%s", cmp.Diff(expected, actual)) { - return nil - } - } - - return nil - }) - assert.NoError(t, err) -} - -func isSequenceTypef(t *testing.T, expected *armadaevents.EventSequence, actual *armadaevents.EventSequence, msg string, args ...interface{}) (ok bool) { - defer func() { - if !ok && msg != "" { - t.Logf(msg, args...) - } - }() - if ok = assert.Equal(t, len(expected.Events), len(actual.Events)); !ok { - return false - } - for i, expectedEvent := range expected.Events { - actualEvent := actual.Events[i] - if ok = assert.IsType(t, expectedEvent.Event, actualEvent.Event); !ok { - return false - } - } - return true -} - -// Like isSequence, but logs msg if a comparison fails. -func isSequencef(t *testing.T, expected *armadaevents.EventSequence, actual *armadaevents.EventSequence, msg string, args ...interface{}) bool { - ok := true - defer func() { - if !ok && msg != "" { - t.Logf(msg, args...) - } - }() - ok = ok && assert.NotNil(t, expected) - ok = ok && assert.NotNil(t, actual) - ok = ok && assert.Equal(t, expected.Queue, actual.Queue) - ok = ok && assert.Equal(t, expected.JobSetName, actual.JobSetName) - ok = ok && assert.Equal(t, expected.UserId, actual.UserId) - ok = ok && assert.Equal(t, len(expected.Events), len(actual.Events)) - if len(expected.Events) == len(actual.Events) { - for i, expectedEvent := range expected.Events { - actualEvent := actual.Events[i] - ok = ok && isEventf(t, expectedEvent, actualEvent, "%d-th event differed: %s", i, actualEvent) - } - } - return ok -} - -// Compare an actual event with an expected event. -// Only compares the subset of fields relevant for testing. -func isEventf(t *testing.T, expected *armadaevents.EventSequence_Event, actual *armadaevents.EventSequence_Event, msg string, args ...interface{}) bool { - ok := true - defer func() { - if !ok && msg != "" { - t.Logf(msg, args...) - } - }() - if ok = ok && assert.IsType(t, expected.Event, actual.Event); !ok { - return ok - } - - // If the expected event includes a jobId, the actual event must include the same jobId. - expectedJobId, err := armadaevents.JobIdFromEvent(expected) - if err == nil { - actualJobId, err := armadaevents.JobIdFromEvent(actual) - if err == nil { // Ignore for events without a jobId (e.g., cancelJobSet). - if ok = ok && assert.Equal(t, expectedJobId, actualJobId); ok { - return ok - } - } - } - - switch expectedEvent := expected.Event.(type) { - case *armadaevents.EventSequence_Event_SubmitJob: - actualEvent, ok := actual.Event.(*armadaevents.EventSequence_Event_SubmitJob) - if ok = ok && assert.True(t, ok); !ok { - return ok - } - ok = ok && assert.Equal(t, *expectedEvent.SubmitJob.JobId, *actualEvent.SubmitJob.JobId) - ok = ok && assert.Equal(t, expectedEvent.SubmitJob.DeduplicationId, actualEvent.SubmitJob.DeduplicationId) - ok = ok && assert.Equal(t, expectedEvent.SubmitJob.Priority, actualEvent.SubmitJob.Priority) - ok = ok && assert.IsType(t, expectedEvent.SubmitJob.MainObject.Object, actualEvent.SubmitJob.MainObject.Object) - ok = ok && assert.NotNil(t, expectedEvent.SubmitJob.ObjectMeta) - ok = ok && assert.Equal(t, expectedEvent.SubmitJob.ObjectMeta.Namespace, actualEvent.SubmitJob.ObjectMeta.Namespace) - - expectedObjectCounts := countObjectTypes(expectedEvent.SubmitJob.Objects) - actualObjectCounts := countObjectTypes(actualEvent.SubmitJob.Objects) - ok = ok && assert.Equal(t, expectedObjectCounts, actualObjectCounts) - - ok = ok && assert.NotEmpty(t, actualEvent.SubmitJob.MainObject) - - // The main object must be a podspec. - mainPodSpec, isPodSpec := (actualEvent.SubmitJob.MainObject.Object).(*armadaevents.KubernetesMainObject_PodSpec) - ok = ok && assert.True(t, isPodSpec) - - // Collect all podspecs in the job. - podSpecs := make([]*v1.PodSpec, 0) - podSpecs = append(podSpecs, mainPodSpec.PodSpec.PodSpec) - for _, object := range actualEvent.SubmitJob.Objects { - if podSpec, isPodSpec := (object.Object).(*armadaevents.KubernetesObject_PodSpec); isPodSpec { - podSpecs = append(podSpecs, podSpec.PodSpec.PodSpec) - } - } - - // Test that all podspecs have the expected default tolerations. - for _, podSpec := range podSpecs { - ok = ok && assert.Equal(t, expectedTolerations, podSpec.Tolerations) - } - - return ok - case *armadaevents.EventSequence_Event_ReprioritiseJob: - case *armadaevents.EventSequence_Event_CancelJob: - case *armadaevents.EventSequence_Event_JobSucceeded: - case *armadaevents.EventSequence_Event_JobRunSucceeded: - case *armadaevents.EventSequence_Event_JobRunLeased: - case *armadaevents.EventSequence_Event_JobRunAssigned: - case *armadaevents.EventSequence_Event_JobRunRunning: - case *armadaevents.EventSequence_Event_JobRunErrors: - } - return true -} - -// countObjectTypes returns a map from object type (as a string) to the number of objects of that type. -func countObjectTypes(objects []*armadaevents.KubernetesObject) map[string]int { - result := make(map[string]int) - for _, object := range objects { - typeName := fmt.Sprintf("%T", object.Object) - count, _ := result[typeName] - result[typeName] = count + 1 - } - return result -} - -func receiveJobSetSequences( - ctx context.Context, - consumer pulsar.Consumer, - queue string, - jobSetName string, - maxEvents int, - timeout time.Duration, -) (sequences []*armadaevents.EventSequence, err error) { - acceptAllFilter := func(event *armadaevents.EventSequence_Event) bool { return true } - return receiveJobSetSequencesWithEventFilter(ctx, consumer, queue, jobSetName, maxEvents, timeout, acceptAllFilter) -} - -// receiveJobSetSequence receives messages from Pulsar, discarding any messages not for queue and jobSetName. -// The events contained in the remaining messages are collected in a single sequence, which is returned. -func receiveJobSetSequencesWithEventFilter( - ctx context.Context, - consumer pulsar.Consumer, - queue string, - jobSetName string, - maxEvents int, - timeout time.Duration, - eventFilterFunc func(*armadaevents.EventSequence_Event) bool, -) (sequences []*armadaevents.EventSequence, err error) { - sequences = make([]*armadaevents.EventSequence, 0) - numEvents := 0 - for numEvents < maxEvents { - ctxWithTimeout, _ := context.WithTimeout(ctx, timeout) - var msg pulsar.Message - msg, err = consumer.Receive(ctxWithTimeout) - if err == context.DeadlineExceeded { - fmt.Println("Timed out waiting for event") - err = nil // Timeout is expected; ignore. - return - } else if err != nil { - fmt.Println("Pulsar receive error", err) - continue - } - err = consumer.Ack(msg) - if err != nil { - fmt.Println("Pulsar ack error", err) - continue - } - - sequence := &armadaevents.EventSequence{} - err = proto.Unmarshal(msg.Payload(), sequence) - if err != nil { - fmt.Println("Sequence unmarshalling error", err) - continue - } - fmt.Printf("Received sequence %s\n", sequence) - - if sequence.Queue != queue || sequence.JobSetName != jobSetName { - fmt.Println("Skipping sequence") - continue - } - - filteredEvents := []*armadaevents.EventSequence_Event{} - for _, e := range sequence.Events { - if eventFilterFunc(e) { - filteredEvents = append(filteredEvents, e) - } - } - - sequence.Events = filteredEvents - numEvents += len(sequence.Events) - sequences = append(sequences, sequence) - } - return -} - -// concatenateSequences returns a new sequence containing all events in the provided slice of sequences. -func flattenSequences(sequences []*armadaevents.EventSequence) *armadaevents.EventSequence { - if len(sequences) == 0 { - return nil - } - result := &armadaevents.EventSequence{ - Queue: sequences[0].Queue, - JobSetName: sequences[0].JobSetName, - UserId: sequences[0].UserId, - Events: make([]*armadaevents.EventSequence_Event, 0), - } - for _, sequence := range sequences { - result.Events = append(result.Events, sequence.Events...) - } - return result -} - -// filterSequenceByJobId returns a new event sequence composed of the events for the job with the specified id. -func filterSequenceByJobId(sequence *armadaevents.EventSequence, id *armadaevents.Uuid) (*armadaevents.EventSequence, error) { - result := &armadaevents.EventSequence{ - Queue: sequence.Queue, - JobSetName: sequence.JobSetName, - UserId: sequence.UserId, - Events: make([]*armadaevents.EventSequence_Event, 0), - } - for _, e := range sequence.Events { - jobId, err := armadaevents.JobIdFromEvent(e) - if errors.Is(err, &armadaerrors.ErrInvalidArgument{}) { - continue - } else if err != nil { - return nil, err - } - if *jobId != *id { - continue - } - result.Events = append(result.Events, e) - } - return result, nil -} - -// filterOutStandaloneIngressInfo removed StandaloneIngressInfo events from the sequence. -// We may wish to remove them because these events may arrive in different order (e.g., it could arrive after JobRunSucceeded). -func filterOutStandaloneIngressInfo(sequence *armadaevents.EventSequence) (*armadaevents.EventSequence, []*armadaevents.StandaloneIngressInfo, error) { - result := &armadaevents.EventSequence{ - Queue: sequence.Queue, - JobSetName: sequence.JobSetName, - UserId: sequence.UserId, - Events: make([]*armadaevents.EventSequence_Event, 0), - } - standaloneIngressInfos := make([]*armadaevents.StandaloneIngressInfo, 0) - for _, e := range sequence.Events { - if ingressInfo, ok := e.Event.(*armadaevents.EventSequence_Event_StandaloneIngressInfo); ok { - standaloneIngressInfos = append(standaloneIngressInfos, ingressInfo.StandaloneIngressInfo) - continue - } - result.Events = append(result.Events, e) - } - return result, standaloneIngressInfos, nil -} - -// Create a job submit request for testing. -func createJobSubmitRequest(numJobs int) *api.JobSubmitRequest { - return createJobSubmitRequestWithClientId(numJobs, uuid.New().String()) -} - -// Create a job submit request for testing. -func createJobSubmitRequestWithClientId(numJobs int, clientId string) *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := make([]*api.JobSubmitRequestItem, numJobs, numJobs) - for i := 0; i < numJobs; i++ { - itemClientId := clientId - if itemClientId != "" { - itemClientId = fmt.Sprintf("%s-%d", itemClientId, i) - } - items[i] = &api.JobSubmitRequestItem{ - Namespace: userNamespace, - Priority: 1, - ClientId: itemClientId, - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "container1", - Image: "alpine:3.18.3", - Args: []string{"sleep", "5s"}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - }, - }, - }, - } - } - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Return a job request with a container that queries the specified address using wget. -func createWgetJobRequest(address string) *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := []*api.JobSubmitRequestItem{ - { - Namespace: userNamespace, - Priority: 1, - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "wget", - Image: "alpine:3.18.3", - Args: []string{"wget", address, "--timeout=5"}, // Queried from the k8s services API - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - }, - }, - }, - }, - } - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Create a job submit request with an ingress. -func createJobSubmitRequestWithIngress() *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := make([]*api.JobSubmitRequestItem, 1) - - items[0] = &api.JobSubmitRequestItem{ - Namespace: userNamespace, - Priority: 1, - ClientId: uuid.New().String(), // So we can test that we get back the right thing - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "nginx", - Image: "nginx:1.21.6", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - // Armada silently deletes services/ingresses unless the main pod exposes those. - // Hence, we need to expose the following ports. - Ports: []v1.ContainerPort{ - { - ContainerPort: 80, - Protocol: v1.ProtocolTCP, - Name: "port", - }, - }, - }, - }, - }, - Ingress: []*api.IngressConfig{ - { - Ports: []uint32{80}, - }, - }, - } - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Create a job submit request with services. -func createJobSubmitRequestWithService() *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := make([]*api.JobSubmitRequestItem, 1) - - items[0] = &api.JobSubmitRequestItem{ - Namespace: userNamespace, - Priority: 1, - ClientId: uuid.New().String(), // So we can test that we get back the right thing - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "nginx", - Image: "nginx:1.21.6", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - // Armada silently deletes services/ingresses unless the main pod exposes those. - // Hence, we need to expose the following ports. - Ports: []v1.ContainerPort{ - { - ContainerPort: 80, - Protocol: v1.ProtocolTCP, - Name: "port80", - }, - { - ContainerPort: 6000, - Protocol: v1.ProtocolTCP, - Name: "port6000", - }, - }, - }, - }, - }, - Services: []*api.ServiceConfig{ - { - Type: api.ServiceType_Headless, - Ports: []uint32{80}, - }, - { - Type: api.ServiceType_NodePort, - Ports: []uint32{80}, - }, - }, - } - - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Create a job submit request with ingresses and services for testing. -func createJobSubmitRequestWithEverything(numJobs int) *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := make([]*api.JobSubmitRequestItem, numJobs, numJobs) - for i := 0; i < numJobs; i++ { - items[i] = &api.JobSubmitRequestItem{ - Namespace: userNamespace, - Priority: 1, - ClientId: uuid.New().String(), // So we can test that we get back the right thing - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "container1", - Image: "alpine:3.18.3", - Args: []string{"sleep", "5s"}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - // Armada silently deletes services/ingresses unless the main pod exposes those. - // Hence, we need to expose the following ports. - Ports: []v1.ContainerPort{ - { - ContainerPort: 5000, - Protocol: v1.ProtocolTCP, - Name: "port5000", - }, - { - ContainerPort: 6000, - Protocol: v1.ProtocolTCP, - Name: "port6000", - }, - { - ContainerPort: 7000, - Protocol: v1.ProtocolTCP, - Name: "port7000", - }, - }, - }, - }, - }, - Ingress: []*api.IngressConfig{ - { - Type: api.IngressType_Ingress, - Ports: []uint32{5000}, - }, - }, - Services: []*api.ServiceConfig{ - { - Type: api.ServiceType_NodePort, - Ports: []uint32{6000}, - }, - { - Type: api.ServiceType_Headless, - Ports: []uint32{7000}, - }, - }, - } - } - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Create a job submit request with a job that returns an error after 1 second. -func createJobSubmitRequestWithError(numJobs int) *api.JobSubmitRequest { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - items := make([]*api.JobSubmitRequestItem, numJobs, numJobs) - for i := 0; i < numJobs; i++ { - items[i] = &api.JobSubmitRequestItem{ - Namespace: userNamespace, - Priority: 1, - ClientId: uuid.New().String(), // So we can test that we get back the right thing - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "container1", - Image: "alpine:3.18.3", - Args: []string{"sleep", "5s", "&&", "exit", "1"}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - }, - }, - }, - } - } - return &api.JobSubmitRequest{ - Queue: armadaQueueName, - JobSetId: util.NewULID(), - JobRequestItems: items, - } -} - -// Run action with an Armada submit client and a Pulsar producer and consumer. -func withSetup(action func(ctx context.Context, submitClient api.SubmitClient, producer pulsar.Producer, consumer pulsar.Consumer) error) error { - // Connection to the Armada API. To submit API requests. - conn, err := client.CreateApiConnection(&client.ApiConnectionDetails{ArmadaUrl: armadaUrl}) - if err != nil { - return errors.WithStack(err) - } - defer conn.Close() - submitClient := api.NewSubmitClient(conn) - - // Create queue needed for tests. - err = client.CreateQueue(submitClient, &api.Queue{Name: armadaQueueName, PriorityFactor: 1}) - if st, ok := status.FromError(err); ok && st.Code() == codes.AlreadyExists { - // Queue already exists; we don't need to create it. - } else if err != nil { - return errors.WithStack(err) - } - - // Redirect Pulsar logs to a file since it's very verbose. - _ = os.Mkdir("../../.test", os.ModePerm) - f, err := os.OpenFile("../../.test/pulsar.log", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) - if err != nil { - return errors.WithStack(err) - } - logger := logrus.StandardLogger() // .WithField("service", "Pulsar") - logger.Out = f - - // Connection to Pulsar. To check that the correct sequence of messages are produced. - pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{ - URL: pulsarUrl, - OperationTimeout: 5 * time.Second, - Logger: pulsarlog.NewLoggerWithLogrus(logger), - }) - if err != nil { - return errors.WithStack(err) - } - defer pulsarClient.Close() - - producer, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{ - Topic: pulsarTopic, - }) - if err != nil { - return errors.WithStack(err) - } - defer producer.Close() - - consumer, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{ - Topic: pulsarTopic, - SubscriptionName: pulsarSubscription, - }) - if err != nil { - return errors.WithStack(err) - } - defer consumer.Close() - - // Skip any messages already published to Pulsar. - for { - ctxWithTimeout, _ := context.WithTimeout(context.Background(), time.Second) - _, err := consumer.Receive(ctxWithTimeout) - if err == context.DeadlineExceeded { - break - } else if err != nil { - return errors.WithStack(err) - } - } - - return action(context.Background(), submitClient, producer, consumer) -} diff --git a/e2e/pulsartest_client/app_test.go b/e2e/pulsartest_client/app_test.go deleted file mode 100644 index 6ef204ada48..00000000000 --- a/e2e/pulsartest_client/app_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package pulsartest_client - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - cfg "github.com/armadaproject/armada/internal/armada/configuration" - pt "github.com/armadaproject/armada/internal/pulsartest" -) - -func TestNew(t *testing.T) { - // Basic success path - pc := cfg.PulsarConfig{ - URL: "pulsar://localhost:6650", - JobsetEventsTopic: "events", - } - app, err := pt.New(pt.Params{Pulsar: pc}, "submit") - assert.NoError(t, err) - assert.NotNil(t, app) - - // Completely empty config - pc = cfg.PulsarConfig{} - app, err = pt.New(pt.Params{Pulsar: pc}, "submit") - assert.Error(t, err) - assert.Nil(t, app) - - // Missing topic name - pc = cfg.PulsarConfig{ - URL: "pulsar://localhost:6650", - } - app, err = pt.New(pt.Params{Pulsar: pc}, "submit") - assert.Error(t, err) - assert.Nil(t, app) - - // Invalid command type - pc = cfg.PulsarConfig{ - URL: "pulsar://localhost:6650", - JobsetEventsTopic: "events", - } - app, err = pt.New(pt.Params{Pulsar: pc}, "observe") - assert.Error(t, err) - assert.Nil(t, app) - - // Nonexistent topic - pc = cfg.PulsarConfig{ - URL: "pulsar://localhost:6650", - JobsetEventsTopic: "persistent://armada/armada/nonesuch", - } - app, err = pt.New(pt.Params{Pulsar: pc}, "submit") - assert.Error(t, err) - assert.Nil(t, app) -} diff --git a/e2e/setup/ingress-nginx.yaml b/e2e/setup/ingress-nginx.yaml index db08bbf59ed..9ddc483e29f 100644 --- a/e2e/setup/ingress-nginx.yaml +++ b/e2e/setup/ingress-nginx.yaml @@ -669,4 +669,4 @@ webhooks: - UPDATE resources: - ingresses - sideEffects: None \ No newline at end of file + sideEffects: None diff --git a/e2e/setup/kind.yaml b/e2e/setup/kind.yaml index 792e2758b5c..747be28e941 100644 --- a/e2e/setup/kind.yaml +++ b/e2e/setup/kind.yaml @@ -1,11 +1,13 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 name: armada-test +featureGates: + "KubeletInUserNamespace": true nodes: - role: worker - image: kindest/node:v1.24.7 + image: kindest/node:v1.26.15 - role: control-plane - image: kindest/node:v1.24.7 + image: kindest/node:v1.26.15 kubeadmConfigPatches: - | kind: InitConfiguration diff --git a/go.mod b/go.mod index 9cda3d18388..c67881c9d49 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/armadaproject/armada -go 1.20 +go 1.21 + +toolchain go1.21.10 // athenz@v1.10.5 and onwards bundle encrypted signing keys with the source code. // Because corporate proxies may block go get commands that pull in encrypted data, @@ -9,7 +11,6 @@ replace github.com/AthenZ/athenz v1.10.39 => github.com/AthenZ/athenz v1.10.4 require ( github.com/apache/pulsar-client-go v0.11.0 - github.com/avast/retry-go v3.0.0+incompatible github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/go-openapi/analysis v0.22.2 @@ -18,9 +19,9 @@ require ( github.com/go-openapi/runtime v0.26.0 github.com/go-openapi/spec v0.20.14 github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.3 - github.com/google/go-cmp v0.5.9 - github.com/google/uuid v1.4.0 + github.com/golang/protobuf v1.5.4 + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -28,24 +29,21 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v1.0.2 github.com/instrumenta/kubeval v0.0.0-20190918223246-8d013ec9fc56 - github.com/jackc/pgtype v1.14.2 - github.com/jackc/pgx/v4 v4.17.2 // indirect github.com/jolestar/go-commons-pool v2.0.0+incompatible github.com/jstemmer/go-junit-report/v2 v2.0.0 - github.com/lib/pq v1.10.7 // indirect github.com/mattn/go-zglob v0.0.4 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/ulid v1.3.1 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.17.0 github.com/rakyll/statik v0.1.7 github.com/renstrom/shortuuid v3.0.0+incompatible github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.15.0 + github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 github.com/weaveworks/promrus v1.2.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 @@ -53,15 +51,16 @@ require ( golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/tools v0.18.0 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/grpc v1.57.1 + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/grpc v1.59.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.22.4 - k8s.io/apimachinery v0.22.4 - k8s.io/client-go v0.22.4 - k8s.io/component-helpers v0.22.4 - k8s.io/kubelet v0.22.4 - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + k8s.io/api v0.26.15 + k8s.io/apimachinery v0.26.15 + k8s.io/client-go v0.26.15 + k8s.io/component-helpers v0.26.15 + k8s.io/kubectl v0.26.15 + k8s.io/kubelet v0.26.15 + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 modernc.org/sqlite v1.26.0 sigs.k8s.io/yaml v1.4.0 ) @@ -69,6 +68,7 @@ require ( require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/benbjohnson/immutable v0.4.3 + github.com/charmbracelet/glamour v0.7.0 github.com/go-openapi/errors v0.21.0 github.com/go-openapi/strfmt v0.21.10 github.com/go-openapi/swag v0.22.6 @@ -76,21 +76,21 @@ require ( github.com/go-playground/validator/v10 v10.15.4 github.com/gogo/status v1.1.1 github.com/golang/mock v1.6.0 - github.com/goreleaser/goreleaser v1.15.2 + github.com/goreleaser/goreleaser v1.24.0 github.com/jackc/pgx/v5 v5.5.4 github.com/jessevdk/go-flags v1.5.0 github.com/magefile/mage v1.14.0 github.com/minio/highwayhash v1.0.2 github.com/openconfig/goyang v1.2.0 - github.com/prometheus/common v0.39.0 + github.com/prometheus/common v0.45.0 github.com/redis/go-redis/extra/redisprometheus/v9 v9.0.5 github.com/redis/go-redis/v9 v9.5.1 - github.com/sanity-io/litter v1.5.5 github.com/segmentio/fasthash v1.0.3 github.com/xitongsys/parquet-go v1.6.2 - golang.org/x/time v0.3.0 + golang.org/x/time v0.5.0 gonum.org/v1/gonum v0.14.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f + gopkg.in/inf.v0 v0.9.1 ) require ( @@ -98,28 +98,36 @@ require ( github.com/99designs/keyring v1.2.1 // indirect github.com/AthenZ/athenz v1.10.39 // indirect github.com/DataDog/zstd v1.5.0 // indirect + github.com/alecthomas/chroma/v2 v2.8.0 // indirect github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516 // indirect github.com/apache/thrift v0.14.2 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.4.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect github.com/caarlos0/log v0.4.4 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charmbracelet/lipgloss v0.9.1 // indirect github.com/danieljoos/wincred v1.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dlclark/regexp2 v1.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/elliotchance/orderedmap/v2 v2.2.0 // indirect - github.com/evanphx/json-patch v4.11.0+incompatible // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/color v1.14.1 // indirect github.com/fortytw2/leaktest v1.3.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/go-errors/errors v1.0.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -129,76 +137,92 @@ require ( github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang/snappy v0.0.3 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/goreleaser/fileglob v1.3.0 // indirect - github.com/goreleaser/nfpm/v2 v2.29.0 // indirect + github.com/goreleaser/nfpm/v2 v2.35.3 // indirect + github.com/gorilla/css v1.0.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/iancoleman/orderedmap v0.2.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.7.0 // indirect - github.com/jackc/pgio v1.0.0 // indirect + github.com/invopop/jsonschema v0.12.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/compress v1.17.5 // indirect github.com/leodido/go-urn v1.2.4 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/linkedin/goavro/v2 v2.9.8 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/microcosm-cc/bluemonday v1.0.25 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.15.2 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pierrec/lz4 v2.0.5+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 // indirect + github.com/xlab/treeprint v1.1.0 // indirect + github.com/yuin/goldmark v1.5.4 // indirect + github.com/yuin/goldmark-emoji v1.0.2 // indirect go.mongodb.org/mongo-driver v1.13.1 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/mod v0.15.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/square/go-jose.v2 v2.4.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/cli-runtime v0.26.15 // indirect k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect @@ -208,5 +232,8 @@ require ( modernc.org/opt v0.1.3 // indirect modernc.org/strutil v1.1.3 // indirect modernc.org/token v1.0.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/kustomize/api v0.12.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 7274740622b..150504c7265 100644 --- a/go.sum +++ b/go.sum @@ -3,42 +3,27 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= @@ -46,31 +31,18 @@ github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo8 github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= github.com/AthenZ/athenz v1.10.4 h1:EhCptJxuPU2BNU0ZUTJRLrNwAFv06zMx0viN+PrV9YA= github.com/AthenZ/athenz v1.10.4/go.mod h1:ZKAbcckIMkqD2UKqBU2amZoynztPrgYcsmZ934LTDH4= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= +github.com/alecthomas/assert/v2 v2.2.1/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1xE264= +github.com/alecthomas/chroma/v2 v2.8.0/go.mod h1:yrkMI9807G1ROx13fhe1v6PN2DDeaR73L3d+1nmYQtw= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516 h1:byKBBF2CKWBjjA4J1ZL2JXttJULvWSl50LegTyRZ728= github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= @@ -82,44 +54,40 @@ github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4= github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= -github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.30.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.4.3 h1:GYHcksoJ9K6HyAUpGxwZURrbTkXA0Dh4otXGqbhdrjA= github.com/benbjohnson/immutable v0.4.3/go.mod h1:qJIKKSmdqz1tVzNtst1DZzvaqOU1onk1rc03IeM3Owk= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.4.0 h1:+YZ8ePm+He2pU3dZlIZiOeAKfrBkXi1lSrXJ/Xzgbu8= github.com/bits-and-blooms/bitset v1.4.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc= github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo= github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= +github.com/charmbracelet/glamour v0.7.0/go.mod h1:jUMh5MeihljJPQbJ/wf4ldw2+yBP59+ctV36jASy7ps= github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg= github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -127,36 +95,25 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= +github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -164,46 +121,38 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -211,10 +160,8 @@ github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= @@ -225,12 +172,12 @@ github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/ github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk= github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.6 h1:dnqg1XfHXL9aBxSbktBqFR5CxVyVI+7fYWhAf1JOeTw= github.com/go-openapi/swag v0.22.6/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo= github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= @@ -243,11 +190,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a h1:dR8+Q0uO5S2ZBcs2IH6VBKYwSxPo2vYCYq0ot0mu7xA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= @@ -255,7 +199,6 @@ github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -265,9 +208,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -275,38 +216,33 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -315,158 +251,82 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= -github.com/goreleaser/goreleaser v1.15.2 h1:VYCFKTzzZFnFfSqfjvOmT8ip7VcZv1Y9JV13hAuDDB8= -github.com/goreleaser/goreleaser v1.15.2/go.mod h1:LcZCCNpzLtCcIDWnySx2q71iWUkR9t8Ls0qrOlGdH7c= -github.com/goreleaser/nfpm/v2 v2.29.0 h1:QW7MD5Od8ePAWqvC+kGQiF8OH5JkSKV+HcblcT0NX6A= -github.com/goreleaser/nfpm/v2 v2.29.0/go.mod h1:+O8Rgz7geEXG1ym2Yl8CGPg5nP2LRuCgkBK6CQF+Q3c= +github.com/goreleaser/goreleaser v1.24.0 h1:jsoS5T2CvPKOyECPATAo8hCvUaX8ok4iAq9m5Zyl1L0= +github.com/goreleaser/goreleaser v1.24.0/go.mod h1:iEWoXoWy8y5AvqRhHPwXINHLYyyJCz5qkGzooCdRrGo= +github.com/goreleaser/nfpm/v2 v2.35.3 h1:YGEygriY8hbsNdCBUif6RLb5xPISDHc+d22rRGXV4Zk= +github.com/goreleaser/nfpm/v2 v2.35.3/go.mod h1:eyKRLSdXPCV1GgJ0tDNe4SqcZD0Fr5cezRwcuLjpxyM= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= -github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA= -github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/instrumenta/kubeval v0.0.0-20190918223246-8d013ec9fc56 h1:kKOrEaxR9KvCDdnQqjiBxbaeJg/goLvJvW0lno6aWm4= github.com/instrumenta/kubeval v0.0.0-20190918223246-8d013ec9fc56/go.mod h1:bpiMYvNpVxWjdJsS0hDRu9TrobT5GfWCZwJseGUstxE= -github.com/invopop/jsonschema v0.7.0 h1:2vgQcBz1n256N+FpX3Jq7Y17AjYt46Ig3zIWyy770So= -github.com/invopop/jsonschema v0.7.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= -github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= +github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= -github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgtype v1.14.2 h1:QBdZQTKpPdBlw2AdKwHEyqUcm/lrl2cwWAHjCMyln/o= -github.com/jackc/pgtype v1.14.2/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= -github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= @@ -477,54 +337,37 @@ github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jolestar/go-commons-pool v2.0.0+incompatible h1:uHn5uRKsLLQSf9f1J5QPY2xREWx/YH+e4bIIXcAuAaE= github.com/jolestar/go-commons-pool v2.0.0+incompatible/go.mod h1:ChJYIbIch0DMCSU6VU0t0xhPoWDR2mMFIQek3XWU0s8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report/v2 v2.0.0 h1:bMZNO9B16VFn07tKyi4YJFIbZtVmJaa5Xakv9dcwK58= github.com/jstemmer/go-junit-report/v2 v2.0.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= +github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/linkedin/goavro/v2 v2.9.8 h1:jN50elxBsGBDGVDEKqUlDuU1cFwJ11K/yrJCBMe/7Wg= github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= @@ -532,64 +375,45 @@ github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo= github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg= +github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= @@ -597,83 +421,57 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/openconfig/gnmi v0.0.0-20200414194230-1597cc0f2600/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= github.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU= github.com/openconfig/goyang v1.2.0 h1:mChUZvp1kCWq6Q00wVCtOToddFzEsGlMGG+V+wNXva8= github.com/openconfig/goyang v1.2.0/go.mod h1:vX61x01Q46AzbZUzG617vWqh/cB+aisc+RrNkXRd3W8= github.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/getopt v0.0.0-20190409184431-ee0cd42419d3/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v0.0.0-20180724185102-c2dbbc24a979/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/redis/go-redis/extra/redisprometheus/v9 v9.0.5 h1:kvl0LOTQD23VR1R7A9vDti9msfV6mOE2+j6ngYkFsfg= @@ -689,63 +487,43 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= -github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.0-20180820174524-ff0d02e85550/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v0.0.0-20180814060501-14d3d4c51834/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20180821114517-d929dcbb1086/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.1.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -753,27 +531,24 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= @@ -785,81 +560,50 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180816142147-da425ebb7609/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww= github.com/xitongsys/parquet-go v1.6.2 h1:MhCaXii4eqceKPu9BwrjLqyK10oX9WF+xGhwvwbw7xM= github.com/xitongsys/parquet-go v1.6.2/go.mod h1:IulAQyalCm0rPiZVNnCgm/PCL64X2tdSVGMQ/UeKqWA= github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA= github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 h1:a742S4V5A15F93smuVxA60LQWsrCnN8bKeWDBARU1/k= github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE= +github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= +github.com/yuin/goldmark v1.5.4/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark-emoji v1.0.2 h1:c/RgTShNgHTtc6xdz2KKI74jJr6rWi7FPgnP9GAsO5s= +github.com/yuin/goldmark-emoji v1.0.2/go.mod h1:RhP/RWpexdp+KHs7ghKnifRoIs/Bq4nDS7tRbCkOwKY= go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -885,9 +629,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -896,19 +637,12 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -916,35 +650,19 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= @@ -954,10 +672,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -966,27 +680,16 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180821044426-4ea2f632f6e9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -994,63 +697,33 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= @@ -1060,8 +733,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= @@ -1071,33 +742,24 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1105,7 +767,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1113,39 +774,18 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1157,24 +797,13 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1193,35 +822,16 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1231,47 +841,31 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= -google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= @@ -1280,17 +874,13 @@ gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mN gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y= gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1301,36 +891,32 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw= -k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= -k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck= -k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= -k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg= -k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= -k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= -k8s.io/component-helpers v0.22.4 h1:Pso4iXoY6aYLCYQlNkME2MSJvAXo/7lnJYsWHdC6tvE= -k8s.io/component-helpers v0.22.4/go.mod h1:A50qTyczDFbhZDifIfS2zFrHuPk9UNOWPpvNZ+3RSIs= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/api v0.26.15 h1:tjMERUjIwkq+2UtPZL5ZbSsLkpxUv4gXWZfV5lQl+Og= +k8s.io/api v0.26.15/go.mod h1:CtWOrFl8VLCTLolRlhbBxo4fy83tjCLEtYa5pMubIe0= +k8s.io/apimachinery v0.26.15 h1:GPxeERYBSqSZlj3xIkX4L6mBjzZ9q8JPnJ+Vj15qe+g= +k8s.io/apimachinery v0.26.15/go.mod h1:O/uIhIOWuy6ndHqQ6qbkjD7OgeMhVtlk8+Z66ZcmJQc= +k8s.io/cli-runtime v0.26.15 h1:+y3am0YLVBEfe4je5taxVUM8EKQKnUqzmXBdn3Ytxko= +k8s.io/cli-runtime v0.26.15/go.mod h1:AXABAdbXP0xeIJV4SpJ1caMR7FY8GjXTxMsJ5/1iMF0= +k8s.io/client-go v0.26.15 h1:A2Yav2v+VZQfpEsf5ESFp2Lqq5XACKBDrwkG+jEtOg0= +k8s.io/client-go v0.26.15/go.mod h1:KJs7snLEyKPlypqTQG/ngcaqE6h3/6qTvVHDViRL+iI= +k8s.io/component-helpers v0.26.15 h1:2ln2voQ6oLMUKzksr29g47iE1Y0rLdB+2KICF8F1f5Q= +k8s.io/component-helpers v0.26.15/go.mod h1:UwLS62rpGU8sIJfnBWChicMdf14y9hdu5DXicHay4Hk= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubelet v0.22.4 h1:0eaVDObhAuDCDnQJS9xqgfAP5/IWHMt6un4L/DQs0so= -k8s.io/kubelet v0.22.4/go.mod h1:9dCtyqqDnXJYF9E2mejBmDQb+flkAGFBzGgnlW/goyo= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kubectl v0.26.15 h1:Q118/ZVWmUYEm6Iod8MKuxQFwTBBopBogGq5tkudvhg= +k8s.io/kubectl v0.26.15/go.mod h1:JgN3H70qdFjI/93T91gVOAsSExxNmccoCQLDNX//aYw= +k8s.io/kubelet v0.26.15 h1:zf6epB3dqA5bperYLhyuFr+gQQ9qasM95cyeKAuodZc= +k8s.io/kubelet v0.26.15/go.mod h1:8g/EzBlR1ByT5jkYbH9iaCNCFKDUNhHj4cx38UrtyiY= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= @@ -1338,7 +924,9 @@ modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= @@ -1352,16 +940,22 @@ modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= +sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= +sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= +sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/armada/configuration/constants.go b/internal/armada/configuration/constants.go index b5bb9be9b88..86bdc38f4a8 100644 --- a/internal/armada/configuration/constants.go +++ b/internal/armada/configuration/constants.go @@ -7,16 +7,11 @@ const ( // GangCardinalityAnnotation All jobs in a gang must specify the total number of jobs in the gang via this annotation. // The cardinality should be expressed as a positive integer, e.g., "3". GangCardinalityAnnotation = "armadaproject.io/gangCardinality" - // GangMinimumCardinalityAnnotation All jobs in a gang must specify the minimum size for the gang to be schedulable via this annotation. - // The cardinality should be expressed as a positive integer, e.g., "3". - GangMinimumCardinalityAnnotation = "armadaproject.io/gangMinimumCardinality" // The jobs that make up a gang may be constrained to be scheduled across a set of uniform nodes. // Specifically, if provided, all gang jobs are scheduled onto nodes for which the value of the provided label is equal. // Used to ensure, e.g., that all gang jobs are scheduled onto the same cluster or rack. GangNodeUniformityLabelAnnotation = "armadaproject.io/gangNodeUniformityLabel" // GangNumJobsScheduledAnnotation is set by the scheduler and indicates how many gang jobs were scheduled. - // For example, a gang composed of 4 jobs may only have a subset be scheduled if GangMinimumCardinalityAnnotation < 4. - GangNumJobsScheduledAnnotation = "armadaproject.io/numGangJobsScheduled" // FailFastAnnotation, if set to true, ensures Armada does not re-schedule jobs that fail to start. // Instead, the job the pod is part of fails immediately. FailFastAnnotation = "armadaproject.io/failFast" @@ -25,9 +20,7 @@ const ( var schedulingAnnotations = map[string]bool{ GangIdAnnotation: true, GangCardinalityAnnotation: true, - GangMinimumCardinalityAnnotation: true, GangNodeUniformityLabelAnnotation: true, - GangNumJobsScheduledAnnotation: true, FailFastAnnotation: true, } diff --git a/internal/armada/configuration/types.go b/internal/armada/configuration/types.go index 55e7c6ba8bb..575ea21f77a 100644 --- a/internal/armada/configuration/types.go +++ b/internal/armada/configuration/types.go @@ -1,19 +1,15 @@ package configuration import ( - "fmt" "time" "github.com/apache/pulsar-client-go/pulsar" - "github.com/go-playground/validator/v10" "github.com/redis/go-redis/v9" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" authconfig "github.com/armadaproject/armada/internal/common/auth/configuration" grpcconfig "github.com/armadaproject/armada/internal/common/grpc/configuration" armadaresource "github.com/armadaproject/armada/internal/common/resource" - "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/pkg/client" ) @@ -33,18 +29,16 @@ type ArmadaConfig struct { SchedulerApiConnection client.ApiConnectionDetails - CancelJobsBatchSize int - - Redis redis.UniversalOptions EventsApiRedis redis.UniversalOptions Pulsar PulsarConfig - Postgres PostgresConfig // Used for Pulsar submit API deduplication + Postgres PostgresConfig // Needs to point to the lookout db QueryApi QueryApiConfig + // Period At which the Queue cache will be refreshed + QueueCacheRefreshPeriod time.Duration + // Config relating to job submission. Submission SubmissionConfig - // Scheduling config used by the submitChecker. - Scheduling SchedulingConfig } type PulsarConfig struct { @@ -131,217 +125,11 @@ type SubmissionConfig struct { DefaultActiveDeadlineByResourceRequest map[string]time.Duration } -// SchedulingConfig contains config controlling the Armada scheduler. -// -// The Armada scheduler is in charge of assigning pods to cluster and nodes. -// The Armada scheduler is part of the Armada control plane. -// -// Features: -// 1. Queuing and fairly dividing resources between users. -// 2. Fair preemption, including between jobs of equal priority to balance resource allocation. -// 3. Gang scheduling, optional across clusters, and with lower and upper bounds on the number of jobs scheduled. -// -// Note that Armada still relies on kube-scheduler for binding of pods to nodes. -// This is achieved by adding to each pod created by Armada a node selector that matches only the intended node. -type SchedulingConfig struct { - // Set to true to disable scheduling - DisableScheduling bool - // Set to true to enable scheduler assertions. This results in some performance loss. - EnableAssertions bool - // If using PreemptToFairShare, - // the probability of evicting jobs on a node to balance resource usage. - // TODO(albin): Remove. - NodeEvictionProbability float64 - // If using PreemptToFairShare, - // the probability of evicting jobs on oversubscribed nodes, i.e., - // nodes on which the total resource requests are greater than the available resources. - // TODO(albin): Remove. - NodeOversubscriptionEvictionProbability float64 - // Only queues allocated more than this fraction of their fair share are considered for preemption. - ProtectedFractionOfFairShare float64 `validate:"gte=0"` - // Armada adds a node selector term to every scheduled pod using this label with the node name as value. - // This to force kube-scheduler to schedule pods on the node chosen by Armada. - // For example, if NodeIdLabel is "kubernetes.io/hostname" and armada schedules a pod on node "myNode", - // then Armada adds "kubernetes.io/hostname": "myNode" to the pod node selector before sending it to the executor. - NodeIdLabel string `validate:"required"` - // Map from priority class names to priority classes. - // Must be consistent with Kubernetes priority classes. - // I.e., priority classes defined here must be defined in all executor clusters and should map to the same priority. - PriorityClasses map[string]types.PriorityClass `validate:"dive"` - // Jobs with no priority class are assigned this priority class when ingested by the scheduler. - // Must be a key in the PriorityClasses map above. - DefaultPriorityClassName string - // If set, override the priority class name of pods with this value when sending to an executor. - PriorityClassNameOverride *string - // Number of jobs to load from the database at a time. - MaxQueueLookback uint - // In each invocation of the scheduler, no more jobs are scheduled once this limit has been exceeded. - // Note that the total scheduled resources may be greater than this limit. - MaximumResourceFractionToSchedule map[string]float64 - // Overrides MaximalClusterFractionToSchedule if set for the current pool. - MaximumResourceFractionToScheduleByPool map[string]map[string]float64 - // The rate at which Armada schedules jobs is rate-limited using a token bucket approach. - // Specifically, there is a token bucket that persists between scheduling rounds. - // The bucket fills up at a rate of MaximumSchedulingRate tokens per second and has capacity MaximumSchedulingBurst. - // A token is removed from the bucket when a scheduling a job and scheduling stops while the bucket is empty. - // - // Hence, MaximumSchedulingRate controls the maximum number of jobs scheduled per second in steady-state, - // i.e., once the burst capacity has been exhausted. - // - // Rate-limiting is based on the number of tokens available at the start of each scheduling round, - // i.e., tokens accumulated while scheduling become available at the start of the next scheduling round. - // - // For more information about the rate-limiter, see: - // https://pkg.go.dev/golang.org/x/time/rate#Limiter - MaximumSchedulingRate float64 `validate:"gt=0"` - // MaximumSchedulingBurst controls the burst capacity of the rate-limiter. - // - // There are two important implications: - // - Armada will never schedule more than MaximumSchedulingBurst jobs per scheduling round. - // - Gang jobs with cardinality greater than MaximumSchedulingBurst can never be scheduled. - MaximumSchedulingBurst int `validate:"gt=0"` - // In addition to the global rate-limiter, there is a separate rate-limiter for each queue. - // These work the same as the global rate-limiter, except they apply only to jobs scheduled from a specific queue. - // - // Per-queue version of MaximumSchedulingRate. - MaximumPerQueueSchedulingRate float64 `validate:"gt=0"` - // Per-queue version of MaximumSchedulingBurst. - MaximumPerQueueSchedulingBurst int `validate:"gt=0"` - // Maximum number of times a job is retried before considered failed. - MaxRetries uint - // List of resource names, e.g., []string{"cpu", "memory"}, to consider when computing DominantResourceFairness. - DominantResourceFairnessResourcesToConsider []string - // Once a node has been found on which a pod can be scheduled, - // the scheduler will consider up to the next maxExtraNodesToConsider nodes. - // The scheduler selects the node with the best score out of the considered nodes. - // In particular, the score expresses whether preemption is necessary to schedule a pod. - // Hence, a larger MaxExtraNodesToConsider would reduce the expected number of preemptions. - // TODO(albin): Remove. It's unused. - MaxExtraNodesToConsider uint - // Resources, e.g., "cpu", "memory", and "nvidia.com/gpu", for which the scheduler creates indexes for efficient lookup. - // This list must contain at least one resource. Adding more than one resource is not required, but may speed up scheduling. - // Ideally, this list contains all resources that frequently constrain which nodes a job can be scheduled onto. - IndexedResources []IndexedResource - // Node labels that the scheduler creates indexes for efficient lookup of. - // Should include node labels frequently used by node selectors on submitted jobs. - // - // If not set, no labels are indexed. - IndexedNodeLabels []string - // Taint keys that the scheduler creates indexes for efficient lookup of. - // Should include keys of taints frequently used in tolerations on submitted jobs. - // - // If not set, all taints are indexed. - IndexedTaints []string - // WellKnownNodeTypes defines a set of well-known node types used to define "home" and "away" nodes for a given priority class. - WellKnownNodeTypes []WellKnownNodeType `validate:"dive"` - // Executor that haven't heartbeated in this time period are considered stale. - // No new jobs are scheduled onto stale executors. - ExecutorTimeout time.Duration - // Maximum number of jobs that can be assigned to a executor but not yet acknowledged, before - // the scheduler is excluded from consideration by the scheduler. - MaxUnacknowledgedJobsPerExecutor uint - // If true, do not during scheduling skip jobs with requirements known to be impossible to meet. - AlwaysAttemptScheduling bool - // The frequency at which the scheduler updates the cluster state. - ExecutorUpdateFrequency time.Duration - // Controls node and queue success probability estimation. - FailureProbabilityEstimation FailureEstimatorConfig - // Controls node quarantining, i.e., removing from consideration for scheduling misbehaving nodes. - NodeQuarantining NodeQuarantinerConfig - // Controls queue quarantining, i.e., rate-limiting scheduling from misbehaving queues. - QueueQuarantining QueueQuarantinerConfig - // Defines the order in which pools will be scheduled. Higher priority pools will be scheduled first - PoolSchedulePriority map[string]int - // Default priority for pools that are not in the above list - DefaultPoolSchedulePriority int -} - -const ( - DuplicateWellKnownNodeTypeErrorMessage = "duplicate well-known node type name" - AwayNodeTypesWithoutPreemptionErrorMessage = "priority class has away node types but is not preemptible" - UnknownWellKnownNodeTypeErrorMessage = "priority class refers to unknown well-known node type" -) - -func SchedulingConfigValidation(sl validator.StructLevel) { - c := sl.Current().Interface().(SchedulingConfig) - - wellKnownNodeTypes := make(map[string]bool) - for i, wellKnownNodeType := range c.WellKnownNodeTypes { - if wellKnownNodeTypes[wellKnownNodeType.Name] { - fieldName := fmt.Sprintf("WellKnownNodeTypes[%d].Name", i) - sl.ReportError(wellKnownNodeType.Name, fieldName, "", DuplicateWellKnownNodeTypeErrorMessage, "") - } - wellKnownNodeTypes[wellKnownNodeType.Name] = true - } - - for priorityClassName, priorityClass := range c.PriorityClasses { - if len(priorityClass.AwayNodeTypes) > 0 && !priorityClass.Preemptible { - fieldName := fmt.Sprintf("Preemption.PriorityClasses[%s].Preemptible", priorityClassName) - sl.ReportError(priorityClass.Preemptible, fieldName, "", AwayNodeTypesWithoutPreemptionErrorMessage, "") - } - - for i, awayNodeType := range priorityClass.AwayNodeTypes { - if !wellKnownNodeTypes[awayNodeType.WellKnownNodeTypeName] { - fieldName := fmt.Sprintf("Preemption.PriorityClasses[%s].AwayNodeTypes[%d].WellKnownNodeTypeName", priorityClassName, i) - sl.ReportError(awayNodeType.WellKnownNodeTypeName, fieldName, "", UnknownWellKnownNodeTypeErrorMessage, "") - } - } - } -} - -// IndexedResource represents a resource the scheduler indexes for efficient lookup. -type IndexedResource struct { - // Resource name, e.g., "cpu", "memory", or "nvidia.com/gpu". - Name string - // Resolution with which Armada tracks this resource; larger values indicate lower resolution. - // In particular, the allocatable resources on each node are rounded to a multiple of the resolution. - // Lower resolution speeds up scheduling by improving node lookup speed but may prevent scheduling jobs, - // since the allocatable resources may be rounded down to be a multiple of the resolution. - // - // See NodeDb docs for more details. - Resolution resource.Quantity -} - -// A WellKnownNodeType defines a set of nodes; see AwayNodeType. -type WellKnownNodeType struct { - // Name is the unique identifier for this node type. - Name string `validate:"required"` - // Taints is the set of taints that characterizes this node type; a node is - // part of this node type if and only if it has all of these taints. - Taints []v1.Taint -} - -// FailureEstimatorConfig controls node and queue success probability estimation. -// See internal/scheduler/failureestimator.go for details. -type FailureEstimatorConfig struct { - Disabled bool - NumInnerIterations int `validate:"gt=0"` - InnerOptimiserStepSize float64 `validate:"gt=0"` - OuterOptimiserStepSize float64 `validate:"gt=0"` - OuterOptimiserNesterovAcceleration float64 `validate:"gte=0"` -} - -// NodeQuarantinerConfig controls how nodes are quarantined, i.e., removed from consideration when scheduling new jobs. -// See internal/scheduler/quarantine/node_quarantiner.go for details. -type NodeQuarantinerConfig struct { - FailureProbabilityQuarantineThreshold float64 `validate:"gte=0,lte=1"` - FailureProbabilityEstimateTimeout time.Duration `validate:"gte=0"` -} - -// QueueQuarantinerConfig controls how scheduling from misbehaving queues is rate-limited. -// See internal/scheduler/quarantine/queue_quarantiner.go for details. -type QueueQuarantinerConfig struct { - QuarantineFactorMultiplier float64 `validate:"gte=0,lte=1"` - FailureProbabilityEstimateTimeout time.Duration `validate:"gte=0"` -} - // TODO: we can probably just typedef this to map[string]string type PostgresConfig struct { Connection map[string]string } type QueryApiConfig struct { - Enabled bool - Postgres PostgresConfig MaxQueryItems int } diff --git a/internal/armada/repository/apimessages/conversions.go b/internal/armada/event/conversion/conversions.go similarity index 95% rename from internal/armada/repository/apimessages/conversions.go rename to internal/armada/event/conversion/conversions.go index ddcd00c7aa0..0e16860b9bb 100644 --- a/internal/armada/repository/apimessages/conversions.go +++ b/internal/armada/event/conversion/conversions.go @@ -1,4 +1,4 @@ -package apimessages +package conversion import ( "time" @@ -33,8 +33,6 @@ func FromEventSequence(es *armadaevents.EventSequence) ([]*api.EventMessage, err convertedEvents, err = FromInternalReprioritiseJob(es.UserId, es.Queue, es.JobSetName, *event.Created, esEvent.ReprioritiseJob) case *armadaevents.EventSequence_Event_ReprioritisedJob: convertedEvents, err = FromInternalReprioritisedJob(es.UserId, es.Queue, es.JobSetName, *event.Created, esEvent.ReprioritisedJob) - case *armadaevents.EventSequence_Event_JobDuplicateDetected: - convertedEvents, err = FromInternalLogDuplicateDetected(es.Queue, es.JobSetName, *event.Created, esEvent.JobDuplicateDetected) case *armadaevents.EventSequence_Event_JobRunLeased: convertedEvents, err = FromInternalLogJobRunLeased(es.Queue, es.JobSetName, *event.Created, esEvent.JobRunLeased) case *armadaevents.EventSequence_Event_JobRunErrors: @@ -57,6 +55,7 @@ func FromEventSequence(es *armadaevents.EventSequence) ([]*api.EventMessage, err *armadaevents.EventSequence_Event_CancelJobSet, *armadaevents.EventSequence_Event_JobRunSucceeded, *armadaevents.EventSequence_Event_JobRequeued, + *armadaevents.EventSequence_Event_JobValidated, *armadaevents.EventSequence_Event_PartitionMarker: // These events have no api analog right now, so we ignore log.Debugf("ignoring event type %T", esEvent) @@ -218,30 +217,6 @@ func FromInternalReprioritisedJob(userId string, queueName string, jobSetName st }, nil } -func FromInternalLogDuplicateDetected(queueName string, jobSetName string, time time.Time, e *armadaevents.JobDuplicateDetected) ([]*api.EventMessage, error) { - jobId, err := armadaevents.UlidStringFromProtoUuid(e.NewJobId) - if err != nil { - return nil, err - } - originalJobId, err := armadaevents.UlidStringFromProtoUuid(e.OldJobId) - if err != nil { - return nil, err - } - return []*api.EventMessage{ - { - Events: &api.EventMessage_DuplicateFound{ - DuplicateFound: &api.JobDuplicateFoundEvent{ - JobId: jobId, - JobSetId: jobSetName, - Queue: queueName, - Created: time, - OriginalJobId: originalJobId, - }, - }, - }, - }, nil -} - func FromInternalLogJobRunLeased(queueName string, jobSetName string, time time.Time, e *armadaevents.JobRunLeased) ([]*api.EventMessage, error) { jobId, err := armadaevents.UlidStringFromProtoUuid(e.JobId) if err != nil { @@ -436,6 +411,20 @@ func FromInternalJobErrors(queueName string, jobSetName string, time time.Time, }, } events = append(events, event) + case *armadaevents.Error_JobRejected: + event := &api.EventMessage{ + Events: &api.EventMessage_Failed{ + Failed: &api.JobFailedEvent{ + JobId: jobId, + JobSetId: jobSetName, + Queue: queueName, + Created: time, + Reason: reason.JobRejected.Message, + Cause: api.Cause_Rejected, + }, + }, + } + events = append(events, event) default: log.Warnf("unknown error %T for job %s", reason, jobId) event := &api.EventMessage{ diff --git a/internal/armada/repository/apimessages/conversions_test.go b/internal/armada/event/conversion/conversions_test.go similarity index 96% rename from internal/armada/repository/apimessages/conversions_test.go rename to internal/armada/event/conversion/conversions_test.go index fb7bf125930..379d848fa8e 100644 --- a/internal/armada/repository/apimessages/conversions_test.go +++ b/internal/armada/event/conversion/conversions_test.go @@ -1,4 +1,4 @@ -package apimessages +package conversion import ( "testing" @@ -239,39 +239,6 @@ func TestConvertReprioritised(t *testing.T) { assert.Equal(t, expected, apiEvents) } -func TestDuplicateJob(t *testing.T) { - oldJobString := "02f3j0g1md4qx7z5qb148qnh4r" - oldJobProto, _ := armadaevents.ProtoUuidFromUlidString(oldJobString) - - duplicate := &armadaevents.EventSequence_Event{ - Created: &baseTime, - Event: &armadaevents.EventSequence_Event_JobDuplicateDetected{ - JobDuplicateDetected: &armadaevents.JobDuplicateDetected{ - NewJobId: jobIdProto, - OldJobId: oldJobProto, - }, - }, - } - - expected := []*api.EventMessage{ - { - Events: &api.EventMessage_DuplicateFound{ - DuplicateFound: &api.JobDuplicateFoundEvent{ - JobId: jobIdString, - JobSetId: jobSetName, - Queue: queue, - Created: baseTime, - OriginalJobId: oldJobString, - }, - }, - }, - } - - apiEvents, err := FromEventSequence(toEventSeq(duplicate)) - assert.NoError(t, err) - assert.Equal(t, expected, apiEvents) -} - func TestConvertLeased(t *testing.T) { leased := &armadaevents.EventSequence_Event{ Created: &baseTime, diff --git a/internal/armada/server/event.go b/internal/armada/event/event.go similarity index 86% rename from internal/armada/server/event.go rename to internal/armada/event/event.go index f885936d4bd..aeb0785855b 100644 --- a/internal/armada/server/event.go +++ b/internal/armada/event/event.go @@ -1,4 +1,4 @@ -package server +package event import ( "context" @@ -10,33 +10,31 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/armadaproject/armada/internal/armada/event/sequence" "github.com/armadaproject/armada/internal/armada/permissions" - "github.com/armadaproject/armada/internal/armada/repository" - "github.com/armadaproject/armada/internal/armada/repository/sequence" + armadaqueue "github.com/armadaproject/armada/internal/armada/queue" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" ) type EventServer struct { - authorizer ActionAuthorizer - eventRepository repository.EventRepository - queueRepository repository.QueueRepository - jobRepository repository.JobRepository + authorizer auth.ActionAuthorizer + eventRepository EventRepository + queueRepository armadaqueue.ReadOnlyQueueRepository } func NewEventServer( - authorizer ActionAuthorizer, - eventRepository repository.EventRepository, - queueRepository repository.QueueRepository, - jobRepository repository.JobRepository, + authorizer auth.ActionAuthorizer, + eventRepository EventRepository, + queueRepository armadaqueue.ReadOnlyQueueRepository, ) *EventServer { return &EventServer{ authorizer: authorizer, eventRepository: eventRepository, queueRepository: queueRepository, - jobRepository: jobRepository, } } @@ -44,7 +42,7 @@ func NewEventServer( func (s *EventServer) GetJobSetEvents(request *api.JobSetRequest, stream api.Event_GetJobSetEventsServer) error { ctx := armadacontext.FromGrpcCtx(stream.Context()) q, err := s.queueRepository.GetQueue(ctx, request.Queue) - var expected *repository.ErrQueueNotFound + var expected *armadaqueue.ErrQueueNotFound if errors.As(err, &expected) { return status.Errorf(codes.NotFound, "[GetJobSetEvents] Queue %s does not exist", request.Queue) } else if err != nil { @@ -80,13 +78,11 @@ func (s *EventServer) Watch(req *api.WatchRequest, stream api.Event_WatchServer) FromMessageId: req.FromId, Queue: req.Queue, ErrorIfMissing: true, - ForceLegacy: req.ForceLegacy, - ForceNew: req.ForceNew, } return s.GetJobSetEvents(request, stream) } -func (s *EventServer) serveEventsFromRepository(request *api.JobSetRequest, eventRepository repository.EventRepository, +func (s *EventServer) serveEventsFromRepository(request *api.JobSetRequest, eventRepository EventRepository, stream api.Event_GetJobSetEventsServer, ) error { ctx := armadacontext.FromGrpcCtx(stream.Context()) @@ -150,7 +146,7 @@ func (s *EventServer) serveEventsFromRepository(request *api.JobSetRequest, even } } -func validateUserHasWatchPermissions(ctx *armadacontext.Context, authorizer ActionAuthorizer, q queue.Queue, jobSetId string) error { +func validateUserHasWatchPermissions(ctx *armadacontext.Context, authorizer auth.ActionAuthorizer, q queue.Queue, jobSetId string) error { err := authorizer.AuthorizeQueueAction(ctx, q, permissions.WatchAllEvents, queue.PermissionVerbWatch) var permErr *armadaerrors.ErrUnauthorized if errors.As(err, &permErr) { diff --git a/internal/armada/repository/event.go b/internal/armada/event/event_repository.go similarity index 96% rename from internal/armada/repository/event.go rename to internal/armada/event/event_repository.go index 3e112b74799..e214eda225f 100644 --- a/internal/armada/repository/event.go +++ b/internal/armada/event/event_repository.go @@ -1,4 +1,4 @@ -package repository +package event import ( "context" @@ -12,8 +12,8 @@ import ( "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" - "github.com/armadaproject/armada/internal/armada/repository/apimessages" - "github.com/armadaproject/armada/internal/armada/repository/sequence" + "github.com/armadaproject/armada/internal/armada/event/conversion" + "github.com/armadaproject/armada/internal/armada/event/sequence" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/pkg/api" @@ -157,7 +157,7 @@ func (repo *RedisEventRepository) extractEvents(ctx *armadacontext.Context, msg // These fields are not present in the db messages, so we add them back here es.Queue = queue es.JobSetName = jobSetId - return apimessages.FromEventSequence(es) + return conversion.FromEventSequence(es) } func getJobSetEventsKey(queue, jobSetId string) string { diff --git a/internal/armada/repository/event_test.go b/internal/armada/event/event_repository_test.go similarity index 98% rename from internal/armada/repository/event_test.go rename to internal/armada/event/event_repository_test.go index 7c30e433f29..fec8063d896 100644 --- a/internal/armada/repository/event_test.go +++ b/internal/armada/event/event_repository_test.go @@ -1,4 +1,4 @@ -package repository +package event import ( "testing" @@ -9,7 +9,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" - "github.com/armadaproject/armada/internal/armada/repository/sequence" + "github.com/armadaproject/armada/internal/armada/event/sequence" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/pkg/api" diff --git a/internal/armada/server/event_test.go b/internal/armada/event/event_test.go similarity index 77% rename from internal/armada/server/event_test.go rename to internal/armada/event/event_test.go index 457e708b0d7..28a37bfc459 100644 --- a/internal/armada/server/event_test.go +++ b/internal/armada/event/event_test.go @@ -1,4 +1,4 @@ -package server +package event import ( "context" @@ -8,25 +8,41 @@ import ( "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgxpool" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/armadaproject/armada/internal/armada/permissions" - "github.com/armadaproject/armada/internal/armada/repository" + armadaqueue "github.com/armadaproject/armada/internal/armada/queue" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/internal/common/compress" + "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" "github.com/armadaproject/armada/pkg/client/queue" ) +type FakeActionAuthorizer struct{} + +func (c *FakeActionAuthorizer) AuthorizeAction(_ *armadacontext.Context, _ permission.Permission) error { + return nil +} + +func (c *FakeActionAuthorizer) AuthorizeQueueAction( + _ *armadacontext.Context, + _ queue.Queue, + _ permission.Permission, + _ queue.PermissionVerb, +) error { + return nil +} + func TestEventServer_Health(t *testing.T) { ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) defer cancel() @@ -49,13 +65,19 @@ func TestEventServer_ForceNew(t *testing.T) { t, func(s *EventServer) { jobSetId := "set1" - queue := "" + q := queue.Queue{ + Name: "test-queue", + PriorityFactor: 1, + } jobIdString := "01f3j0g1md4qx7z5qb148qnh4r" runIdString := "123e4567-e89b-12d3-a456-426614174000" baseTime, _ := time.Parse("2006-01-02T15:04:05.000Z", "2022-03-01T15:04:05.000Z") jobIdProto, _ := armadaevents.ProtoUuidFromUlidString(jobIdString) runIdProto := armadaevents.ProtoUuidFromUuid(uuid.MustParse(runIdString)) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) + require.NoError(t, err) + stream := &eventStreamMock{} assigned := &armadaevents.EventSequence_Event{ @@ -68,20 +90,20 @@ func TestEventServer_ForceNew(t *testing.T) { }, } - err := reportPulsarEvent(ctx, &armadaevents.EventSequence{ - Queue: queue, + err = reportPulsarEvent(ctx, &armadaevents.EventSequence{ + Queue: q.Name, JobSetName: jobSetId, Events: []*armadaevents.EventSequence_Event{assigned}, }) require.NoError(t, err) - e := s.GetJobSetEvents(&api.JobSetRequest{Queue: queue, Id: jobSetId, Watch: false, ForceNew: true}, stream) + e := s.GetJobSetEvents(&api.JobSetRequest{Queue: q.Name, Id: jobSetId, Watch: false}, stream) assert.NoError(t, e) assert.Equal(t, 1, len(stream.sendMessages)) expected := &api.EventMessage_Pending{Pending: &api.JobPendingEvent{ JobId: jobIdString, JobSetId: jobSetId, - Queue: queue, + Queue: q.Name, Created: baseTime, }} assert.Equal(t, expected, stream.sendMessages[len(stream.sendMessages)-1].Message.Events) @@ -96,8 +118,14 @@ func TestEventServer_GetJobSetEvents_EmptyStreamShouldNotFail(t *testing.T) { ctx, t, func(s *EventServer) { + q := queue.Queue{ + Name: "test-queue", + PriorityFactor: 1, + } + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) + require.NoError(t, err) stream := &eventStreamMock{} - e := s.GetJobSetEvents(&api.JobSetRequest{Id: "test", Watch: false}, stream) + e := s.GetJobSetEvents(&api.JobSetRequest{Id: "test", Queue: q.Name, Watch: false}, stream) require.NoError(t, e) assert.Equal(t, 0, len(stream.sendMessages)) }, @@ -139,7 +167,7 @@ func TestEventServer_GetJobSetEvents_ErrorIfMissing(t *testing.T) { ctx, t, func(s *EventServer) { - err := s.queueRepository.CreateQueue(ctx, q) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) stream := &eventStreamMock{} @@ -161,7 +189,7 @@ func TestEventServer_GetJobSetEvents_ErrorIfMissing(t *testing.T) { ctx, t, func(s *EventServer) { - err := s.queueRepository.CreateQueue(ctx, q) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) stream := &eventStreamMock{} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -180,7 +208,7 @@ func TestEventServer_GetJobSetEvents_ErrorIfMissing(t *testing.T) { ctx, t, func(s *EventServer) { - err := s.queueRepository.CreateQueue(ctx, q) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) stream := &eventStreamMock{} @@ -224,7 +252,7 @@ func TestEventServer_GetJobSetEvents_ErrorIfMissing(t *testing.T) { ctx, t, func(s *EventServer) { - err := s.queueRepository.CreateQueue(ctx, q) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) require.NoError(t, err) stream := &eventStreamMock{} @@ -290,12 +318,12 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { ctx, t, func(s *EventServer) { - s.authorizer = NewAuthorizer(authorization.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) - err := s.queueRepository.CreateQueue(ctx, q) + s.authorizer = auth.NewAuthorizer(auth.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) - principal := authorization.NewStaticPrincipal("alice", []string{}) - ctx := authorization.WithPrincipal(armadacontext.Background(), principal) + principal := auth.NewStaticPrincipal("alice", []string{}) + ctx := auth.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -315,12 +343,12 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { ctx, t, func(s *EventServer) { - s.authorizer = NewAuthorizer(authorization.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) - err := s.queueRepository.CreateQueue(ctx, q) + s.authorizer = auth.NewAuthorizer(auth.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) - principal := authorization.NewStaticPrincipal("alice", []string{"watch-all-events-group"}) - ctx := authorization.WithPrincipal(armadacontext.Background(), principal) + principal := auth.NewStaticPrincipal("alice", []string{"watch-all-events-group"}) + ctx := auth.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -337,12 +365,12 @@ func TestEventServer_GetJobSetEvents_Permissions(t *testing.T) { t.Run("queue permission", func(t *testing.T) { withEventServer(ctx, t, func(s *EventServer) { - s.authorizer = NewAuthorizer(authorization.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) - err := s.queueRepository.CreateQueue(ctx, q) + s.authorizer = auth.NewAuthorizer(auth.NewPrincipalPermissionChecker(perms, emptyPerms, emptyPerms)) + err := s.queueRepository.(armadaqueue.QueueRepository).CreateQueue(ctx, q) assert.NoError(t, err) - principal := authorization.NewStaticPrincipal("alice", []string{"watch-events-group", "watch-queue-group"}) - ctx := authorization.WithPrincipal(armadacontext.Background(), principal) + principal := auth.NewStaticPrincipal("alice", []string{"watch-events-group", "watch-queue-group"}) + ctx := auth.WithPrincipal(armadacontext.Background(), principal) stream := &eventStreamMock{ctx: ctx} err = s.GetJobSetEvents(&api.JobSetRequest{ @@ -384,30 +412,19 @@ func reportPulsarEvent(ctx *armadacontext.Context, es *armadaevents.EventSequenc func withEventServer(ctx *armadacontext.Context, t *testing.T, action func(s *EventServer)) { t.Helper() + _ = lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + client := redis.NewClient(&redis.Options{Addr: "localhost:6379", DB: 11}) - // using real redis instance as miniredis does not support streams - legacyClient := redis.NewClient(&redis.Options{Addr: "localhost:6379", DB: 10}) - client := redis.NewClient(&redis.Options{Addr: "localhost:6379", DB: 11}) - - eventRepo := repository.NewEventRepository(client) - queueRepo := repository.NewRedisQueueRepository(client) - jobRepo := repository.NewRedisJobRepository(client) - server := NewEventServer(&FakeActionAuthorizer{}, eventRepo, queueRepo, jobRepo) + eventRepo := NewEventRepository(client) + queueRepo := armadaqueue.NewPostgresQueueRepository(db) + server := NewEventServer(&FakeActionAuthorizer{}, eventRepo, queueRepo) + client.FlushDB(ctx) - client.FlushDB(ctx) - legacyClient.FlushDB(ctx) + action(server) - // Create test queue - err := queueRepo.CreateQueue(ctx, queue.Queue{ - Name: "", - Permissions: nil, - PriorityFactor: 1, + client.FlushDB(ctx) + return nil }) - require.NoError(t, err) - action(server) - - client.FlushDB(ctx) - legacyClient.FlushDB(ctx) } type eventStreamMock struct { diff --git a/internal/armada/repository/sequence/sequence.go b/internal/armada/event/sequence/sequence.go similarity index 100% rename from internal/armada/repository/sequence/sequence.go rename to internal/armada/event/sequence/sequence.go diff --git a/internal/armada/repository/sequence/sequence_test.go b/internal/armada/event/sequence/sequence_test.go similarity index 100% rename from internal/armada/repository/sequence/sequence_test.go rename to internal/armada/event/sequence/sequence_test.go diff --git a/internal/armada/job_expiration.go b/internal/armada/job_expiration.go deleted file mode 100644 index 69204045d73..00000000000 --- a/internal/armada/job_expiration.go +++ /dev/null @@ -1,88 +0,0 @@ -package armada - -import ( - "time" - - "github.com/apache/pulsar-client-go/pulsar" - log "github.com/sirupsen/logrus" - - "github.com/armadaproject/armada/internal/armada/repository" - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/eventutil" - "github.com/armadaproject/armada/internal/common/logging" - armadaslices "github.com/armadaproject/armada/internal/common/slices" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/pkg/armadaevents" -) - -type PulsarJobExpirer struct { - Consumer pulsar.Consumer - JobRepository repository.JobRepository -} - -func (srv *PulsarJobExpirer) Run(ctx *armadacontext.Context) error { - eventChan := srv.Consumer.Chan() - for { - select { - case <-ctx.Done(): - log.Infof("Context expired, stopping job details expiration loop") - return nil - case msg, ok := <-eventChan: - if !ok { - log.Infof("Channel closing, stopping job details expiration loop") - return nil - } - // Unmarshal and validate the message. - sequence, err := eventutil.UnmarshalEventSequence(ctx, msg.Payload()) - if err == nil { - errExpiring := srv.handlePulsarSchedulerEventSequence(ctx, sequence) - if errExpiring != nil { - logging.WithStacktrace(ctx, err).Warnf("Could not expire PulsarJobDetails; ignoring") - } - } else { - logging.WithStacktrace(ctx, err).Warnf("Could not unmarshall event sequenbce; ignoring") - } - srv.ack(ctx, msg) - } - } -} - -func (srv *PulsarJobExpirer) handlePulsarSchedulerEventSequence(ctx *armadacontext.Context, sequence *armadaevents.EventSequence) error { - idsOfJobsToExpireMappingFor := make([]string, 0) - for _, event := range sequence.GetEvents() { - var jobId string - var err error - switch e := event.Event.(type) { - case *armadaevents.EventSequence_Event_JobSucceeded: - jobId, err = armadaevents.UlidStringFromProtoUuid(e.JobSucceeded.JobId) - case *armadaevents.EventSequence_Event_JobErrors: - if ok := armadaslices.AnyFunc(e.JobErrors.Errors, func(e *armadaevents.Error) bool { return e.Terminal }); ok { - jobId, err = armadaevents.UlidStringFromProtoUuid(e.JobErrors.JobId) - } - case *armadaevents.EventSequence_Event_CancelledJob: - jobId, err = armadaevents.UlidStringFromProtoUuid(e.CancelledJob.JobId) - default: - // Non-terminal event - continue - } - if err != nil { - logging.WithStacktrace(ctx, err).Warnf("failed to determine jobId from event of type %T; ignoring", event.Event) - continue - } - idsOfJobsToExpireMappingFor = append(idsOfJobsToExpireMappingFor, jobId) - } - return srv.JobRepository.ExpirePulsarSchedulerJobDetails(ctx, idsOfJobsToExpireMappingFor) -} - -func (srv *PulsarJobExpirer) ack(ctx *armadacontext.Context, msg pulsar.Message) { - util.RetryUntilSuccess( - ctx, - func() error { - return srv.Consumer.Ack(msg) - }, - func(err error) { - log.WithError(err).Warnf("Error acking pulsar message") - time.Sleep(time.Second) - }, - ) -} diff --git a/internal/armada/mocks/generate.go b/internal/armada/mocks/generate.go index 8a53285bda3..cf4cfba22d3 100644 --- a/internal/armada/mocks/generate.go +++ b/internal/armada/mocks/generate.go @@ -1,7 +1,6 @@ package mocks // Mock implementations used by tests -//go:generate mockgen -destination=./mock_submitchecker.go -package=mocks "github.com/armadaproject/armada/internal/scheduler" SubmitScheduleChecker -//go:generate mockgen -destination=./mock_respository.go -package=mocks "github.com/armadaproject/armada/internal/armada/repository" QueueRepository,JobRepository +//go:generate mockgen -destination=./mock_repository.go -package=mocks "github.com/armadaproject/armada/internal/armada/queue" QueueRepository //go:generate mockgen -destination=./mock_deduplicator.go -package=mocks "github.com/armadaproject/armada/internal/armada/submit" Deduplicator,Publisher //go:generate mockgen -destination=./mock_authorizer.go -package=mocks "github.com/armadaproject/armada/internal/armada/server" ActionAuthorizer diff --git a/internal/scheduler/mocks/queue_repository.go b/internal/armada/mocks/mock_repository.go similarity index 95% rename from internal/scheduler/mocks/queue_repository.go rename to internal/armada/mocks/mock_repository.go index e2589ee3d96..d908290bbe8 100644 --- a/internal/scheduler/mocks/queue_repository.go +++ b/internal/armada/mocks/mock_repository.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/armadaproject/armada/internal/armada/repository (interfaces: QueueRepository) +// Source: github.com/armadaproject/armada/internal/armada/queue (interfaces: QueueRepository) -// Package schedulermocks is a generated GoMock package. -package schedulermocks +// Package mocks is a generated GoMock package. +package mocks import ( reflect "reflect" diff --git a/internal/armada/mocks/mock_respository.go b/internal/armada/mocks/mock_respository.go deleted file mode 100644 index 9915b4c4f75..00000000000 --- a/internal/armada/mocks/mock_respository.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/armadaproject/armada/internal/armada/repository (interfaces: QueueRepository,JobRepository) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - armadacontext "github.com/armadaproject/armada/internal/common/armadacontext" - schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - queue "github.com/armadaproject/armada/pkg/client/queue" - gomock "github.com/golang/mock/gomock" -) - -// MockQueueRepository is a mock of QueueRepository interface. -type MockQueueRepository struct { - ctrl *gomock.Controller - recorder *MockQueueRepositoryMockRecorder -} - -// MockQueueRepositoryMockRecorder is the mock recorder for MockQueueRepository. -type MockQueueRepositoryMockRecorder struct { - mock *MockQueueRepository -} - -// NewMockQueueRepository creates a new mock instance. -func NewMockQueueRepository(ctrl *gomock.Controller) *MockQueueRepository { - mock := &MockQueueRepository{ctrl: ctrl} - mock.recorder = &MockQueueRepositoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQueueRepository) EXPECT() *MockQueueRepositoryMockRecorder { - return m.recorder -} - -// CreateQueue mocks base method. -func (m *MockQueueRepository) CreateQueue(arg0 *armadacontext.Context, arg1 queue.Queue) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateQueue", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateQueue indicates an expected call of CreateQueue. -func (mr *MockQueueRepositoryMockRecorder) CreateQueue(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueue", reflect.TypeOf((*MockQueueRepository)(nil).CreateQueue), arg0, arg1) -} - -// DeleteQueue mocks base method. -func (m *MockQueueRepository) DeleteQueue(arg0 *armadacontext.Context, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteQueue", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteQueue indicates an expected call of DeleteQueue. -func (mr *MockQueueRepositoryMockRecorder) DeleteQueue(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockQueueRepository)(nil).DeleteQueue), arg0, arg1) -} - -// GetAllQueues mocks base method. -func (m *MockQueueRepository) GetAllQueues(arg0 *armadacontext.Context) ([]queue.Queue, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllQueues", arg0) - ret0, _ := ret[0].([]queue.Queue) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllQueues indicates an expected call of GetAllQueues. -func (mr *MockQueueRepositoryMockRecorder) GetAllQueues(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllQueues", reflect.TypeOf((*MockQueueRepository)(nil).GetAllQueues), arg0) -} - -// GetQueue mocks base method. -func (m *MockQueueRepository) GetQueue(arg0 *armadacontext.Context, arg1 string) (queue.Queue, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetQueue", arg0, arg1) - ret0, _ := ret[0].(queue.Queue) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetQueue indicates an expected call of GetQueue. -func (mr *MockQueueRepositoryMockRecorder) GetQueue(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueue", reflect.TypeOf((*MockQueueRepository)(nil).GetQueue), arg0, arg1) -} - -// UpdateQueue mocks base method. -func (m *MockQueueRepository) UpdateQueue(arg0 *armadacontext.Context, arg1 queue.Queue) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateQueue", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateQueue indicates an expected call of UpdateQueue. -func (mr *MockQueueRepositoryMockRecorder) UpdateQueue(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateQueue", reflect.TypeOf((*MockQueueRepository)(nil).UpdateQueue), arg0, arg1) -} - -// MockJobRepository is a mock of JobRepository interface. -type MockJobRepository struct { - ctrl *gomock.Controller - recorder *MockJobRepositoryMockRecorder -} - -// MockJobRepositoryMockRecorder is the mock recorder for MockJobRepository. -type MockJobRepositoryMockRecorder struct { - mock *MockJobRepository -} - -// NewMockJobRepository creates a new mock instance. -func NewMockJobRepository(ctrl *gomock.Controller) *MockJobRepository { - mock := &MockJobRepository{ctrl: ctrl} - mock.recorder = &MockJobRepositoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockJobRepository) EXPECT() *MockJobRepositoryMockRecorder { - return m.recorder -} - -// ExpirePulsarSchedulerJobDetails mocks base method. -func (m *MockJobRepository) ExpirePulsarSchedulerJobDetails(arg0 *armadacontext.Context, arg1 []string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExpirePulsarSchedulerJobDetails", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ExpirePulsarSchedulerJobDetails indicates an expected call of ExpirePulsarSchedulerJobDetails. -func (mr *MockJobRepositoryMockRecorder) ExpirePulsarSchedulerJobDetails(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpirePulsarSchedulerJobDetails", reflect.TypeOf((*MockJobRepository)(nil).ExpirePulsarSchedulerJobDetails), arg0, arg1) -} - -// GetPulsarSchedulerJobDetails mocks base method. -func (m *MockJobRepository) GetPulsarSchedulerJobDetails(arg0 *armadacontext.Context, arg1 string) (*schedulerobjects.PulsarSchedulerJobDetails, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPulsarSchedulerJobDetails", arg0, arg1) - ret0, _ := ret[0].(*schedulerobjects.PulsarSchedulerJobDetails) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPulsarSchedulerJobDetails indicates an expected call of GetPulsarSchedulerJobDetails. -func (mr *MockJobRepositoryMockRecorder) GetPulsarSchedulerJobDetails(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPulsarSchedulerJobDetails", reflect.TypeOf((*MockJobRepository)(nil).GetPulsarSchedulerJobDetails), arg0, arg1) -} - -// StorePulsarSchedulerJobDetails mocks base method. -func (m *MockJobRepository) StorePulsarSchedulerJobDetails(arg0 *armadacontext.Context, arg1 []*schedulerobjects.PulsarSchedulerJobDetails) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorePulsarSchedulerJobDetails", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// StorePulsarSchedulerJobDetails indicates an expected call of StorePulsarSchedulerJobDetails. -func (mr *MockJobRepositoryMockRecorder) StorePulsarSchedulerJobDetails(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorePulsarSchedulerJobDetails", reflect.TypeOf((*MockJobRepository)(nil).StorePulsarSchedulerJobDetails), arg0, arg1) -} diff --git a/internal/armada/mocks/mock_submitchecker.go b/internal/armada/mocks/mock_submitchecker.go deleted file mode 100644 index 442280dc0e2..00000000000 --- a/internal/armada/mocks/mock_submitchecker.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/armadaproject/armada/internal/scheduler (interfaces: SubmitScheduleChecker) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - jobdb "github.com/armadaproject/armada/internal/scheduler/jobdb" - armadaevents "github.com/armadaproject/armada/pkg/armadaevents" - gomock "github.com/golang/mock/gomock" -) - -// MockSubmitScheduleChecker is a mock of SubmitScheduleChecker interface. -type MockSubmitScheduleChecker struct { - ctrl *gomock.Controller - recorder *MockSubmitScheduleCheckerMockRecorder -} - -// MockSubmitScheduleCheckerMockRecorder is the mock recorder for MockSubmitScheduleChecker. -type MockSubmitScheduleCheckerMockRecorder struct { - mock *MockSubmitScheduleChecker -} - -// NewMockSubmitScheduleChecker creates a new mock instance. -func NewMockSubmitScheduleChecker(ctrl *gomock.Controller) *MockSubmitScheduleChecker { - mock := &MockSubmitScheduleChecker{ctrl: ctrl} - mock.recorder = &MockSubmitScheduleCheckerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSubmitScheduleChecker) EXPECT() *MockSubmitScheduleCheckerMockRecorder { - return m.recorder -} - -// CheckApiJobs mocks base method. -func (m *MockSubmitScheduleChecker) CheckApiJobs(arg0 *armadaevents.EventSequence, arg1 string) (bool, string) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckApiJobs", arg0, arg1) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(string) - return ret0, ret1 -} - -// CheckApiJobs indicates an expected call of CheckApiJobs. -func (mr *MockSubmitScheduleCheckerMockRecorder) CheckApiJobs(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckApiJobs", reflect.TypeOf((*MockSubmitScheduleChecker)(nil).CheckApiJobs), arg0, arg1) -} - -// CheckJobDbJobs mocks base method. -func (m *MockSubmitScheduleChecker) CheckJobDbJobs(arg0 []*jobdb.Job) (bool, string) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckJobDbJobs", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(string) - return ret0, ret1 -} - -// CheckJobDbJobs indicates an expected call of CheckJobDbJobs. -func (mr *MockSubmitScheduleCheckerMockRecorder) CheckJobDbJobs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckJobDbJobs", reflect.TypeOf((*MockSubmitScheduleChecker)(nil).CheckJobDbJobs), arg0) -} diff --git a/internal/armada/queue/queue_cache.go b/internal/armada/queue/queue_cache.go new file mode 100644 index 00000000000..39e06db3bd9 --- /dev/null +++ b/internal/armada/queue/queue_cache.go @@ -0,0 +1,81 @@ +package queue + +import ( + "fmt" + "sync/atomic" + "time" + + "golang.org/x/exp/maps" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/pkg/client/queue" +) + +// CachedQueueRepository is an implementation of ReadOnlyQueueRepository that fetches ques periodically and caches them. +// This means the queue information may be slightly out of date but allows us to continue api operations even if the +// queue is unavailable +type CachedQueueRepository struct { + updateFrequency time.Duration + underlyingRepo QueueRepository + queues atomic.Pointer[map[string]queue.Queue] +} + +func NewCachedQueueRepository(underlyingRepo QueueRepository, updateFrequency time.Duration) *CachedQueueRepository { + return &CachedQueueRepository{ + updateFrequency: updateFrequency, + underlyingRepo: underlyingRepo, + queues: atomic.Pointer[map[string]queue.Queue]{}, + } +} + +func (c *CachedQueueRepository) Run(ctx *armadacontext.Context) error { + if err := c.fetchQueues(ctx); err != nil { + ctx.Warnf("Error fetching queues: %v", err) + } + ticker := time.NewTicker(c.updateFrequency) + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + if err := c.fetchQueues(ctx); err != nil { + ctx.Warnf("Error fetching queues: %v", err) + } + } + } +} + +func (c *CachedQueueRepository) GetQueue(_ *armadacontext.Context, name string) (queue.Queue, error) { + queues := *(c.queues.Load()) + if queues == nil { + return queue.Queue{}, &ErrQueueNotFound{QueueName: name} + } + resolvedQueue, ok := queues[name] + if !ok { + return queue.Queue{}, &ErrQueueNotFound{QueueName: name} + } + return resolvedQueue, nil +} + +func (c *CachedQueueRepository) GetAllQueues(_ *armadacontext.Context) ([]queue.Queue, error) { + queues := c.queues.Load() + if queues == nil { + return nil, fmt.Errorf("no queues available") + } + return maps.Values(*queues), nil +} + +func (c *CachedQueueRepository) fetchQueues(ctx *armadacontext.Context) error { + start := time.Now() + queues, err := c.underlyingRepo.GetAllQueues(ctx) + if err != nil { + return err + } + queuesByName := make(map[string]queue.Queue, len(queues)) + for i := 0; i < len(queues); i++ { + queuesByName[queues[i].Name] = queues[i] + } + c.queues.Store(&queuesByName) + ctx.Infof("Refreshed Queues in %s", time.Since(start)) + return nil +} diff --git a/internal/armada/queue/queue_cache_test.go b/internal/armada/queue/queue_cache_test.go new file mode 100644 index 00000000000..70dcf485d08 --- /dev/null +++ b/internal/armada/queue/queue_cache_test.go @@ -0,0 +1,66 @@ +package queue + +import ( + "sort" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/armadaproject/armada/internal/armada/mocks" + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/pkg/client/queue" +) + +func TestFetch(t *testing.T) { + tests := map[string]struct { + queues []queue.Queue + }{ + "No Queues": { + queues: []queue.Queue{}, + }, + "One Queue": { + queues: []queue.Queue{{Name: "testQueue1"}}, + }, + "Many Queues": { + queues: []queue.Queue{ + {Name: "testQueue1", PriorityFactor: 1}, + {Name: "testQueue2", PriorityFactor: 2}, + {Name: "testQueue3", PriorityFactor: 3}, + {Name: "testQueue4", PriorityFactor: 4}, + {Name: "testQueue5", PriorityFactor: 5}, + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + ctrl := gomock.NewController(t) + mockRepo := mocks.NewMockQueueRepository(ctrl) + mockRepo.EXPECT().GetAllQueues(ctx).Return(tc.queues, nil).Times(1) + + cache := NewCachedQueueRepository(mockRepo, 1*time.Millisecond) + fetchErr := cache.fetchQueues(ctx) + + // Assert that getting all queues worked as expected + queues, getErr := cache.GetAllQueues(ctx) + require.NoError(t, fetchErr) + sort.Slice(queues, func(i, j int) bool { + return queues[i].Name < queues[j].Name + }) + assert.NoError(t, getErr) + assert.Equal(t, tc.queues, queues) + + // Assert that all queues can be fetched individually + for _, expectedQueue := range tc.queues { + actualQueue, err := cache.GetQueue(ctx, expectedQueue.Name) + require.NoError(t, err) + assert.Equal(t, expectedQueue, actualQueue) + } + ctrl.Finish() + cancel() + }) + } +} diff --git a/internal/armada/queue/queue_repository.go b/internal/armada/queue/queue_repository.go new file mode 100644 index 00000000000..7224e656028 --- /dev/null +++ b/internal/armada/queue/queue_repository.go @@ -0,0 +1,143 @@ +package queue + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/pkg/errors" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client/queue" +) + +type ErrQueueNotFound struct { + QueueName string +} + +func (err *ErrQueueNotFound) Error() string { + return fmt.Sprintf("could not find queue %q", err.QueueName) +} + +type ErrQueueAlreadyExists struct { + QueueName string +} + +func (err *ErrQueueAlreadyExists) Error() string { + return fmt.Sprintf("queue %s already exists", err.QueueName) +} + +type QueueRepository interface { + GetAllQueues(ctx *armadacontext.Context) ([]queue.Queue, error) + GetQueue(ctx *armadacontext.Context, name string) (queue.Queue, error) + CreateQueue(*armadacontext.Context, queue.Queue) error + UpdateQueue(*armadacontext.Context, queue.Queue) error + DeleteQueue(ctx *armadacontext.Context, name string) error +} + +type ReadOnlyQueueRepository interface { + GetAllQueues(ctx *armadacontext.Context) ([]queue.Queue, error) + GetQueue(ctx *armadacontext.Context, name string) (queue.Queue, error) +} + +type PostgresQueueRepository struct { + // pool of database connections + db *pgxpool.Pool +} + +func NewPostgresQueueRepository(db *pgxpool.Pool) *PostgresQueueRepository { + return &PostgresQueueRepository{db: db} +} + +func (r *PostgresQueueRepository) GetAllQueues(ctx *armadacontext.Context) ([]queue.Queue, error) { + rows, err := r.db.Query(ctx, "SELECT definition FROM queue") + if err != nil { + return nil, errors.WithStack(err) + } + + defer rows.Close() + + queues := make([]queue.Queue, 0) + for rows.Next() { + var definitionBytes []byte + err := rows.Scan(&definitionBytes) + if err != nil { + return nil, errors.WithStack(err) + } + q, err := r.unmarshalQueue(definitionBytes) + if err != nil { + return nil, errors.WithStack(err) + } + queues = append(queues, q) + } + + if err := rows.Err(); err != nil { + return nil, errors.WithStack(err) + } + return queues, nil +} + +func (r *PostgresQueueRepository) GetQueue(ctx *armadacontext.Context, name string) (queue.Queue, error) { + var definitionBytes []byte + query := "SELECT definition FROM queue WHERE name = $1" + + err := r.db.QueryRow(ctx, query, name).Scan(&definitionBytes) + if err != nil { + q := queue.Queue{} + if errors.Is(err, pgx.ErrNoRows) { + return q, &ErrQueueNotFound{QueueName: name} + } + return q, errors.WithStack(err) + } + + q, err := r.unmarshalQueue(definitionBytes) + if err != nil { + return queue.Queue{}, errors.WithStack(err) + } + return q, nil +} + +func (r *PostgresQueueRepository) CreateQueue(ctx *armadacontext.Context, queue queue.Queue) error { + return r.upsertQueue(ctx, queue) +} + +func (r *PostgresQueueRepository) UpdateQueue(ctx *armadacontext.Context, queue queue.Queue) error { + return r.upsertQueue(ctx, queue) +} + +func (r *PostgresQueueRepository) DeleteQueue(ctx *armadacontext.Context, name string) error { + query := "DELETE FROM queue WHERE name = $1" + _, err := r.db.Exec(ctx, query, name) + if err != nil { + return errors.WithStack(err) + } + return nil +} + +func (r *PostgresQueueRepository) upsertQueue(ctx *armadacontext.Context, queue queue.Queue) error { + data, err := proto.Marshal(queue.ToAPI()) + if err != nil { + return errors.WithStack(err) + } + + query := "INSERT INTO queue (name, definition) VALUES ($1, $2) ON CONFLICT(name) DO UPDATE SET definition = EXCLUDED.definition" + _, err = r.db.Exec(ctx, query, queue.Name, data) + if err != nil { + return errors.WithStack(err) + } + return nil +} + +func (r *PostgresQueueRepository) unmarshalQueue(definitionBytes []byte) (queue.Queue, error) { + apiQueue := &api.Queue{} + if err := proto.Unmarshal(definitionBytes, apiQueue); err != nil { + return queue.Queue{}, err + } + q, err := queue.NewQueue(apiQueue) + if err != nil { + return queue.Queue{}, err + } + return q, nil +} diff --git a/internal/armada/queue/queue_repository_test.go b/internal/armada/queue/queue_repository_test.go new file mode 100644 index 00000000000..0ab964a2abe --- /dev/null +++ b/internal/armada/queue/queue_repository_test.go @@ -0,0 +1,148 @@ +package queue + +import ( + "testing" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/pkg/client/queue" +) + +var ( + queueA = queue.Queue{ + Name: "queueA", + PriorityFactor: 1000, + Permissions: []queue.Permissions{}, + } + queueB = queue.Queue{ + Name: "queueB", + PriorityFactor: 2000, + Permissions: []queue.Permissions{}, + } + twoQueues = []queue.Queue{queueA, queueB} +) + +func TestGetAllQueues(t *testing.T) { + tests := map[string]struct { + queues []queue.Queue + }{ + "Empty Database": { + queues: []queue.Queue{}, + }, + "One Queue": { + queues: []queue.Queue{queueA}, + }, + "Two Queues": { + queues: []queue.Queue{queueA, queueB}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + repo := NewPostgresQueueRepository(db) + for _, q := range tc.queues { + err := repo.CreateQueue(ctx, q) + require.NoError(t, err) + } + fetched, err := repo.GetAllQueues(ctx) + assert.NoError(t, err) + assert.Equal(t, tc.queues, fetched) + return nil + }) + assert.NoError(t, err) + cancel() + }) + } +} + +func TestDeleteQueue(t *testing.T) { + tests := map[string]struct { + intialQueues []queue.Queue + queueToDelete string + }{ + "Empty Database": { + queueToDelete: "queueA", + }, + "QueueNot present": { + intialQueues: twoQueues, + queueToDelete: "queueC", + }, + "Delete Queue": { + intialQueues: twoQueues, + queueToDelete: "queueA", + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + repo := NewPostgresQueueRepository(db) + for _, q := range tc.intialQueues { + err := repo.CreateQueue(ctx, q) + require.NoError(t, err) + } + err := repo.DeleteQueue(ctx, tc.queueToDelete) + require.NoError(t, err) + + _, err = repo.GetQueue(ctx, tc.queueToDelete) + assert.Equal(t, &ErrQueueNotFound{QueueName: tc.queueToDelete}, err) + return nil + }) + assert.NoError(t, err) + cancel() + }) + } +} + +func TestGetAndUpdateQueue(t *testing.T) { + tests := map[string]struct { + intialQueues []queue.Queue + queueToUpdate queue.Queue + }{ + "Empty Database": { + queueToUpdate: queueA, + }, + "Queue Doesn't Exist": { + intialQueues: twoQueues, + queueToUpdate: queue.Queue{ + Name: "queueC", + PriorityFactor: 1, + Permissions: []queue.Permissions{}, + }, + }, + "Queue Does Exist": { + intialQueues: twoQueues, + queueToUpdate: queue.Queue{ + Name: "queueA", + PriorityFactor: queueA.PriorityFactor + 100, + Permissions: []queue.Permissions{}, + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + repo := NewPostgresQueueRepository(db) + for _, q := range tc.intialQueues { + err := repo.CreateQueue(ctx, q) + require.NoError(t, err) + } + err := repo.UpdateQueue(ctx, tc.queueToUpdate) + assert.NoError(t, err) + fetched, err := repo.GetQueue(ctx, tc.queueToUpdate.Name) + require.NoError(t, err) + assert.Equal(t, tc.queueToUpdate, fetched) + return nil + }) + assert.NoError(t, err) + cancel() + }) + } +} diff --git a/internal/armada/queue/queue_service.go b/internal/armada/queue/queue_service.go new file mode 100644 index 00000000000..65cad8a6977 --- /dev/null +++ b/internal/armada/queue/queue_service.go @@ -0,0 +1,191 @@ +package queue + +import ( + "context" + "math" + + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/armadaproject/armada/internal/armada/permissions" + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/armadaerrors" + "github.com/armadaproject/armada/internal/common/auth" + "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client/queue" +) + +type Server struct { + queueRepository QueueRepository + authorizer auth.ActionAuthorizer +} + +func NewServer( + queueRepository QueueRepository, + authorizer auth.ActionAuthorizer, +) *Server { + return &Server{ + queueRepository: queueRepository, + authorizer: authorizer, + } +} + +func (s *Server) CreateQueue(grpcCtx context.Context, req *api.Queue) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + err := s.authorizer.AuthorizeAction(ctx, permissions.CreateQueue) + var ep *armadaerrors.ErrUnauthorized + if errors.As(err, &ep) { + return nil, status.Errorf(codes.PermissionDenied, "[CreateQueue] error creating queue %s: %s", req.Name, ep) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[CreateQueue] error checking permissions: %s", err) + } + + if len(req.UserOwners) == 0 { + principal := auth.GetPrincipal(ctx) + req.UserOwners = []string{principal.GetName()} + } + + queue, err := queue.NewQueue(req) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "[CreateQueue] error validating queue: %s", err) + } + + err = s.queueRepository.CreateQueue(ctx, queue) + var eq *ErrQueueAlreadyExists + if errors.As(err, &eq) { + return nil, status.Errorf(codes.AlreadyExists, "[CreateQueue] error creating queue: %s", err) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[CreateQueue] error creating queue: %s", err) + } + + return &types.Empty{}, nil +} + +func (s *Server) CreateQueues(grpcCtx context.Context, req *api.QueueList) (*api.BatchQueueCreateResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + var failedQueues []*api.QueueCreateResponse + // Create a queue for each element of the request body and return the failures. + for _, queue := range req.Queues { + _, err := s.CreateQueue(ctx, queue) + if err != nil { + failedQueues = append(failedQueues, &api.QueueCreateResponse{ + Queue: queue, + Error: err.Error(), + }) + } + } + + return &api.BatchQueueCreateResponse{ + FailedQueues: failedQueues, + }, nil +} + +func (s *Server) UpdateQueue(grpcCtx context.Context, req *api.Queue) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + err := s.authorizer.AuthorizeAction(ctx, permissions.CreateQueue) + var ep *armadaerrors.ErrUnauthorized + if errors.As(err, &ep) { + return nil, status.Errorf(codes.PermissionDenied, "[UpdateQueue] error updating queue %s: %s", req.Name, ep) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[UpdateQueue] error checking permissions: %s", err) + } + + queue, err := queue.NewQueue(req) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "[UpdateQueue] error: %s", err) + } + + err = s.queueRepository.UpdateQueue(ctx, queue) + var e *ErrQueueNotFound + if errors.As(err, &e) { + return nil, status.Errorf(codes.NotFound, "[UpdateQueue] error: %s", err) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[UpdateQueue] error getting queue %q: %s", queue.Name, err) + } + + return &types.Empty{}, nil +} + +func (s *Server) UpdateQueues(grpcCtx context.Context, req *api.QueueList) (*api.BatchQueueUpdateResponse, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + var failedQueues []*api.QueueUpdateResponse + + // Create a queue for each element of the request body and return the failures. + for _, queue := range req.Queues { + _, err := s.UpdateQueue(ctx, queue) + if err != nil { + failedQueues = append(failedQueues, &api.QueueUpdateResponse{ + Queue: queue, + Error: err.Error(), + }) + } + } + + return &api.BatchQueueUpdateResponse{ + FailedQueues: failedQueues, + }, nil +} + +func (s *Server) DeleteQueue(grpcCtx context.Context, req *api.QueueDeleteRequest) (*types.Empty, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + err := s.authorizer.AuthorizeAction(ctx, permissions.DeleteQueue) + var ep *armadaerrors.ErrUnauthorized + if errors.As(err, &ep) { + return nil, status.Errorf(codes.PermissionDenied, "[DeleteQueue] error deleting queue %s: %s", req.Name, ep) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[DeleteQueue] error checking permissions: %s", err) + } + err = s.queueRepository.DeleteQueue(ctx, req.Name) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "[DeleteQueue] error deleting queue %s: %s", req.Name, err) + } + return &types.Empty{}, nil +} + +func (s *Server) GetQueue(grpcCtx context.Context, req *api.QueueGetRequest) (*api.Queue, error) { + ctx := armadacontext.FromGrpcCtx(grpcCtx) + queue, err := s.queueRepository.GetQueue(ctx, req.Name) + var e *ErrQueueNotFound + if errors.As(err, &e) { + return nil, status.Errorf(codes.NotFound, "[GetQueue] error: %s", err) + } else if err != nil { + return nil, status.Errorf(codes.Unavailable, "[GetQueue] error getting queue %q: %s", req.Name, err) + } + return queue.ToAPI(), nil +} + +func (s *Server) GetQueues(req *api.StreamingQueueGetRequest, stream api.QueueService_GetQueuesServer) error { + ctx := armadacontext.FromGrpcCtx(stream.Context()) + + // Receive once to get information about the number of queues to return + numToReturn := req.GetNum() + if numToReturn < 1 { + numToReturn = math.MaxUint32 + } + + queues, err := s.queueRepository.GetAllQueues(ctx) + if err != nil { + return err + } + for i, queue := range queues { + if uint32(i) < numToReturn { + err := stream.Send(&api.StreamingQueueMessage{ + Event: &api.StreamingQueueMessage_Queue{Queue: queue.ToAPI()}, + }) + if err != nil { + return err + } + } + } + err = stream.Send(&api.StreamingQueueMessage{ + Event: &api.StreamingQueueMessage_End{ + End: &api.EndMarker{}, + }, + }) + if err != nil { + return err + } + return nil +} diff --git a/internal/armada/repository/health_redis.go b/internal/armada/repository/health_redis.go deleted file mode 100644 index 2782ef1648b..00000000000 --- a/internal/armada/repository/health_redis.go +++ /dev/null @@ -1,25 +0,0 @@ -package repository - -import ( - "context" - "fmt" - - "github.com/redis/go-redis/v9" -) - -type RedisHealth struct { - db redis.UniversalClient -} - -func NewRedisHealth(db redis.UniversalClient) *RedisHealth { - return &RedisHealth{db: db} -} - -func (r *RedisHealth) Check() error { - _, err := r.db.Ping(context.Background()).Result() - if err == nil { - return nil - } else { - return fmt.Errorf("[RedisHealth.Check] error: %s", err) - } -} diff --git a/internal/armada/repository/job.go b/internal/armada/repository/job.go deleted file mode 100644 index e66e827d391..00000000000 --- a/internal/armada/repository/job.go +++ /dev/null @@ -1,85 +0,0 @@ -package repository - -import ( - "fmt" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/redis/go-redis/v9" - - "github.com/armadaproject/armada/internal/common/armadacontext" - protoutil "github.com/armadaproject/armada/internal/common/proto" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" -) - -const ( - pulsarJobPrefix = "PulsarJob:" // {jobId} - pulsarjob protobuf object -) - -type JobRepository interface { - StorePulsarSchedulerJobDetails(ctx *armadacontext.Context, jobDetails []*schedulerobjects.PulsarSchedulerJobDetails) error - GetPulsarSchedulerJobDetails(ctx *armadacontext.Context, jobIds string) (*schedulerobjects.PulsarSchedulerJobDetails, error) - ExpirePulsarSchedulerJobDetails(ctx *armadacontext.Context, jobId []string) error -} - -type RedisJobRepository struct { - db redis.UniversalClient -} - -func NewRedisJobRepository( - db redis.UniversalClient, -) *RedisJobRepository { - return &RedisJobRepository{db: db} -} - -func (repo *RedisJobRepository) StorePulsarSchedulerJobDetails(ctx *armadacontext.Context, jobDetails []*schedulerobjects.PulsarSchedulerJobDetails) error { - pipe := repo.db.Pipeline() - for _, job := range jobDetails { - key := fmt.Sprintf("%s%s", pulsarJobPrefix, job.JobId) - jobData, err := proto.Marshal(job) - if err != nil { - return errors.WithStack(err) - } - pipe.Set(ctx, key, jobData, 375*24*time.Hour) // expire after a year - } - _, err := pipe.Exec(ctx) - if err != nil { - return errors.Wrapf(err, "error storing pulsar job details in redis") - } - return nil -} - -func (repo *RedisJobRepository) GetPulsarSchedulerJobDetails(ctx *armadacontext.Context, jobId string) (*schedulerobjects.PulsarSchedulerJobDetails, error) { - cmd := repo.db.Get(ctx, pulsarJobPrefix+jobId) - - bytes, err := cmd.Bytes() - if err != nil && err != redis.Nil { - return nil, errors.Wrapf(err, "Errror retrieving job details for %s in redis", jobId) - } - if err == redis.Nil { - return nil, nil - } - details, err := protoutil.Unmarshall(bytes, &schedulerobjects.PulsarSchedulerJobDetails{}) - if err != nil { - return nil, errors.Wrapf(err, "Errror unmarshalling job details for %s in redis", jobId) - } - - return details, nil -} - -func (repo *RedisJobRepository) ExpirePulsarSchedulerJobDetails(ctx *armadacontext.Context, jobIds []string) error { - if len(jobIds) == 0 { - return nil - } - pipe := repo.db.Pipeline() - for _, jobId := range jobIds { - key := fmt.Sprintf("%s%s", pulsarJobPrefix, jobId) - // Expire as opposed to delete so that we are permissive of race conditions. - pipe.Expire(ctx, key, 1*time.Hour) - } - if _, err := pipe.Exec(ctx); err != nil { - return errors.Wrap(err, "failed to delete pulsar job details in Redis") - } - return nil -} diff --git a/internal/armada/repository/job_test.go b/internal/armada/repository/job_test.go deleted file mode 100644 index 80f4db5a413..00000000000 --- a/internal/armada/repository/job_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package repository - -import ( - "testing" - "time" - - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" -) - -func TestStoreAndGetPulsarSchedulerJobDetails(t *testing.T) { - ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) - defer cancel() - withRepository(ctx, func(r *RedisJobRepository) { - details := &schedulerobjects.PulsarSchedulerJobDetails{ - JobId: util.NewULID(), - Queue: "testQueue", - JobSet: "testJobset", - } - err := r.StorePulsarSchedulerJobDetails(ctx, []*schedulerobjects.PulsarSchedulerJobDetails{details}) - require.NoError(t, err) - - retrievedDetails, err := r.GetPulsarSchedulerJobDetails(ctx, details.JobId) - require.NoError(t, err) - assert.Equal(t, details, retrievedDetails) - - nonExistantDetails, err := r.GetPulsarSchedulerJobDetails(ctx, "not a valid details key") - require.NoError(t, err) - assert.Nil(t, nonExistantDetails) - }) -} - -func withRepository(ctx *armadacontext.Context, action func(r *RedisJobRepository)) { - client := redis.NewClient(&redis.Options{Addr: "localhost:6379", DB: 10}) - defer client.FlushDB(ctx) - defer client.Close() - client.FlushDB(ctx) - repo := NewRedisJobRepository(client) - action(repo) -} diff --git a/internal/armada/repository/queue.go b/internal/armada/repository/queue.go deleted file mode 100644 index 6213f508dca..00000000000 --- a/internal/armada/repository/queue.go +++ /dev/null @@ -1,138 +0,0 @@ -package repository - -import ( - "fmt" - - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/redis/go-redis/v9" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client/queue" -) - -const queueHashKey = "Queue" - -type ErrQueueNotFound struct { - QueueName string -} - -func (err *ErrQueueNotFound) Error() string { - return fmt.Sprintf("could not find queue %q", err.QueueName) -} - -type ErrQueueAlreadyExists struct { - QueueName string -} - -func (err *ErrQueueAlreadyExists) Error() string { - return fmt.Sprintf("queue %s already exists", err.QueueName) -} - -type QueueRepository interface { - GetAllQueues(ctx *armadacontext.Context) ([]queue.Queue, error) - GetQueue(ctx *armadacontext.Context, name string) (queue.Queue, error) - CreateQueue(*armadacontext.Context, queue.Queue) error - UpdateQueue(*armadacontext.Context, queue.Queue) error - DeleteQueue(ctx *armadacontext.Context, name string) error -} - -type RedisQueueRepository struct { - db redis.UniversalClient -} - -func NewRedisQueueRepository(db redis.UniversalClient) *RedisQueueRepository { - return &RedisQueueRepository{db: db} -} - -func (r *RedisQueueRepository) GetAllQueues(ctx *armadacontext.Context) ([]queue.Queue, error) { - result, err := r.db.HGetAll(ctx, queueHashKey).Result() - if err != nil { - return nil, errors.WithStack(err) - } - - queues := make([]queue.Queue, len(result)) - i := 0 - for _, v := range result { - apiQueue := &api.Queue{} - if err := proto.Unmarshal([]byte(v), apiQueue); err != nil { - return nil, errors.WithStack(err) - } - queue, err := queue.NewQueue(apiQueue) - if err != nil { - return nil, err - } - queues[i] = queue - i++ - } - return queues, nil -} - -func (r *RedisQueueRepository) GetQueue(ctx *armadacontext.Context, name string) (queue.Queue, error) { - result, err := r.db.HGet(ctx, queueHashKey, name).Result() - if err == redis.Nil { - return queue.Queue{}, &ErrQueueNotFound{QueueName: name} - } else if err != nil { - return queue.Queue{}, fmt.Errorf("[RedisQueueRepository.GetQueue] error reading from database: %s", err) - } - - apiQueue := &api.Queue{} - e := proto.Unmarshal([]byte(result), apiQueue) - if e != nil { - return queue.Queue{}, fmt.Errorf("[RedisQueueRepository.GetQueue] error unmarshalling queue: %s", err) - } - - return queue.NewQueue(apiQueue) -} - -func (r *RedisQueueRepository) CreateQueue(ctx *armadacontext.Context, queue queue.Queue) error { - data, err := proto.Marshal(queue.ToAPI()) - if err != nil { - return fmt.Errorf("[RedisQueueRepository.CreateQueue] error marshalling queue: %s", err) - } - - // HSetNX sets a key-value pair if the key doesn't already exist. - // If the key exists, this is a no-op, and result is false. - result, err := r.db.HSetNX(ctx, queueHashKey, queue.Name, data).Result() - if err != nil { - return fmt.Errorf("[RedisQueueRepository.CreateQueue] error writing to database: %s", err) - } - if !result { - return &ErrQueueAlreadyExists{QueueName: queue.Name} - } - - return nil -} - -// TODO If the queue to be updated is deleted between this method checking if the queue exists and -// making the update, the deleted queue is re-added to Redis. There's no "update if exists" -// operation in Redis, so we need to do this with a script or transaction. -func (r *RedisQueueRepository) UpdateQueue(ctx *armadacontext.Context, queue queue.Queue) error { - existsResult, err := r.db.HExists(ctx, queueHashKey, queue.Name).Result() - if err != nil { - return fmt.Errorf("[RedisQueueRepository.UpdateQueue] error reading from database: %s", err) - } else if !existsResult { - return &ErrQueueNotFound{QueueName: queue.Name} - } - - data, err := proto.Marshal(queue.ToAPI()) - if err != nil { - return fmt.Errorf("[RedisQueueRepository.UpdateQueue] error marshalling queue: %s", err) - } - - result := r.db.HSet(ctx, queueHashKey, queue.Name, data) - if err := result.Err(); err != nil { - return fmt.Errorf("[RedisQueueRepository.UpdateQueue] error writing to database: %s", err) - } - - return nil -} - -func (r *RedisQueueRepository) DeleteQueue(ctx *armadacontext.Context, name string) error { - result := r.db.HDel(ctx, queueHashKey, name) - if err := result.Err(); err != nil { - return fmt.Errorf("[RedisQueueRepository.DeleteQueue] error deleting queue: %s", err) - } - return nil -} diff --git a/internal/armada/server.go b/internal/armada/server.go index 298a64cb6c8..86fbabfae21 100644 --- a/internal/armada/server.go +++ b/internal/armada/server.go @@ -8,7 +8,6 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/google/uuid" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/redis/go-redis/extra/redisprometheus/v9" @@ -17,21 +16,17 @@ import ( "google.golang.org/grpc" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/armada/event" "github.com/armadaproject/armada/internal/armada/queryapi" - "github.com/armadaproject/armada/internal/armada/repository" - "github.com/armadaproject/armada/internal/armada/server" + "github.com/armadaproject/armada/internal/armada/queue" "github.com/armadaproject/armada/internal/armada/submit" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/auth" - "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" grpcCommon "github.com/armadaproject/armada/internal/common/grpc" "github.com/armadaproject/armada/internal/common/health" - "github.com/armadaproject/armada/internal/common/pgkeyvalue" "github.com/armadaproject/armada/internal/common/pulsarutils" - "github.com/armadaproject/armada/internal/scheduler" - schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/reports" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/api" @@ -57,9 +52,6 @@ func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healt // we add all services to a slice and start them together at the end of this function. var services []func() error - if err := validateCancelJobsBatchSizeConfig(config); err != nil { - return err - } if err := validateSubmissionConfig(config.Submission); err != nil { return err } @@ -85,15 +77,17 @@ func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healt return nil }) - // Setup Redis - db := createRedisClient(&config.Redis) - defer func() { - if err := db.Close(); err != nil { - log.WithError(err).Error("failed to close Redis client") - } - }() - prometheus.MustRegister( - redisprometheus.NewCollector("armada", "redis", db)) + // Create database connection. This is used for the query api, queues and for job deduplication + dbPool, err := database.OpenPgxPool(config.Postgres) + if err != nil { + return errors.WithMessage(err, "error creating postgres pool") + } + defer dbPool.Close() + queryapiServer := queryapi.New( + dbPool, + config.QueryApi.MaxQueryItems, + func() compress.Decompressor { return compress.NewZlibDecompressor() }) + api.RegisterJobsServer(grpcServer, queryapiServer) eventDb := createRedisClient(&config.EventsApiRedis) defer func() { @@ -104,38 +98,21 @@ func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healt prometheus.MustRegister( redisprometheus.NewCollector("armada", "events_redis", eventDb)) - jobRepository := repository.NewRedisJobRepository(db) - queueRepository := repository.NewRedisQueueRepository(db) - healthChecks.Add(repository.NewRedisHealth(db)) - - eventRepository := repository.NewEventRepository(eventDb) + queueRepository := queue.NewPostgresQueueRepository(dbPool) + queueCache := queue.NewCachedQueueRepository(queueRepository, config.QueueCacheRefreshPeriod) + services = append(services, func() error { + return queueCache.Run(ctx) + }) + eventRepository := event.NewEventRepository(eventDb) - authorizer := server.NewAuthorizer( - authorization.NewPrincipalPermissionChecker( + authorizer := auth.NewAuthorizer( + auth.NewPrincipalPermissionChecker( config.Auth.PermissionGroupMapping, config.Auth.PermissionScopeMapping, config.Auth.PermissionClaimMapping, ), ) - // If pool settings are provided, open a connection pool to be shared by all services. - var dbPool *pgxpool.Pool - dbPool, err = database.OpenPgxPool(config.Postgres) - if err != nil { - return err - } - defer dbPool.Close() - - // Executor Repositories for pulsar scheduler - pulsarExecutorRepo := schedulerdb.NewRedisExecutorRepository(db, "pulsar") - submitChecker := scheduler.NewSubmitChecker( - 30*time.Minute, - config.Scheduling, - pulsarExecutorRepo, - ) - services = append(services, func() error { - return submitChecker.Run(ctx) - }) serverId := uuid.New() var pulsarClient pulsar.Client // API endpoints that generate Pulsar messages. @@ -157,46 +134,16 @@ func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healt } defer publisher.Close() - // KV store where we Automatically clean up keys after two weeks. - store, err := pgkeyvalue.New(ctx, dbPool, config.Pulsar.DedupTable) - if err != nil { - return err - } - services = append(services, func() error { - return store.PeriodicCleanup(ctx, time.Hour, 14*24*time.Hour) - }) + queueServer := queue.NewServer(queueRepository, authorizer) - pulsarSubmitServer := submit.NewServer( + submitServer := submit.NewServer( + queueServer, publisher, - queueRepository, - jobRepository, + queueCache, config.Submission, - submit.NewDeduplicator(store), - submitChecker, + submit.NewDeduplicator(dbPool), authorizer) - // Consumer that's used for deleting pulsarJob details - // Need to use the old config.Pulsar.RedisFromPulsarSubscription name so we continue processing where we left off - // TODO: delete this when we finally remove redis - consumer, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{ - Topic: config.Pulsar.JobsetEventsTopic, - SubscriptionName: config.Pulsar.RedisFromPulsarSubscription, - Type: pulsar.KeyShared, - ReceiverQueueSize: config.Pulsar.ReceiverQueueSize, - }) - if err != nil { - return errors.WithStack(err) - } - defer consumer.Close() - - jobExpirer := &PulsarJobExpirer{ - Consumer: consumer, - JobRepository: jobRepository, - } - services = append(services, func() error { - return jobExpirer.Run(ctx) - }) - schedulerApiConnection, err := createApiConnection(config.SchedulerApiConnection) if err != nil { return errors.Wrapf(err, "error creating connection to scheduler api") @@ -204,27 +151,15 @@ func Serve(ctx *armadacontext.Context, config *configuration.ArmadaConfig, healt schedulerApiReportsClient := schedulerobjects.NewSchedulerReportingClient(schedulerApiConnection) schedulingReportsServer := reports.NewProxyingSchedulingReportsServer(schedulerApiReportsClient) - eventServer := server.NewEventServer( + eventServer := event.NewEventServer( authorizer, eventRepository, - queueRepository, - jobRepository, + queueCache, ) - if config.QueryApi.Enabled { - queryDb, err := database.OpenPgxPool(config.QueryApi.Postgres) - if err != nil { - return errors.WithMessage(err, "error creating QueryApi postgres pool") - } - queryapiServer := queryapi.New( - queryDb, - config.QueryApi.MaxQueryItems, - func() compress.Decompressor { return compress.NewZlibDecompressor() }) - api.RegisterJobsServer(grpcServer, queryapiServer) - } - - api.RegisterSubmitServer(grpcServer, pulsarSubmitServer) + api.RegisterSubmitServer(grpcServer, submitServer) api.RegisterEventServer(grpcServer, eventServer) + api.RegisterQueueServiceServer(grpcServer, queueServer) schedulerobjects.RegisterSchedulerReportingServer(grpcServer, schedulingReportsServer) grpc_prometheus.Register(grpcServer) @@ -254,14 +189,6 @@ func createRedisClient(config *redis.UniversalOptions) redis.UniversalClient { return redis.NewUniversalClient(config) } -// TODO: Is this all validation that needs to be done? -func validateCancelJobsBatchSizeConfig(config *configuration.ArmadaConfig) error { - if config.CancelJobsBatchSize <= 0 { - return errors.WithStack(fmt.Errorf("cancel jobs batch should be greater than 0: is %d", config.CancelJobsBatchSize)) - } - return nil -} - func validateSubmissionConfig(config configuration.SubmissionConfig) error { // Check that the default priority class is allowed to be submitted. if config.DefaultPriorityClassName != "" { diff --git a/internal/armada/submit/conversion/conversions.go b/internal/armada/submit/conversion/conversions.go index 854737a3d49..b0dc957e6e4 100644 --- a/internal/armada/submit/conversion/conversions.go +++ b/internal/armada/submit/conversion/conversions.go @@ -26,11 +26,12 @@ func SubmitJobFromApiRequest( ) *armadaevents.SubmitJob { jobId := idGen() jobIdStr := armadaevents.MustUlidStringFromProtoUuid(jobId) - priority := priorityAsInt32(jobReq.GetPriority()) + priority := PriorityAsInt32(jobReq.GetPriority()) ingressesAndServices := convertIngressesAndServices(jobReq, jobIdStr, jobSetId, queue, owner) msg := &armadaevents.SubmitJob{ JobId: jobId, + JobIdStr: jobIdStr, DeduplicationId: jobReq.GetClientId(), Priority: priority, ObjectMeta: &armadaevents.ObjectMeta{ @@ -45,9 +46,8 @@ func SubmitJobFromApiRequest( }, }, }, - Objects: ingressesAndServices, - Scheduler: jobReq.Scheduler, - QueueTtlSeconds: jobReq.QueueTtlSeconds, + Objects: ingressesAndServices, + Scheduler: jobReq.Scheduler, } postProcess(msg, config) return msg @@ -230,7 +230,7 @@ func createIngressFromService( } } -func priorityAsInt32(priority float64) uint32 { +func PriorityAsInt32(priority float64) uint32 { if priority < 0 { priority = 0 } diff --git a/internal/armada/submit/conversion/conversions_test.go b/internal/armada/submit/conversion/conversions_test.go index fa5abd7c29a..81e4212eb3a 100644 --- a/internal/armada/submit/conversion/conversions_test.go +++ b/internal/armada/submit/conversion/conversions_test.go @@ -388,7 +388,7 @@ func TestPriorityAsInt32(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expectedPriority, priorityAsInt32(tc.priority)) + assert.Equal(t, tc.expectedPriority, PriorityAsInt32(tc.priority)) }) } } diff --git a/internal/armada/submit/conversion/post_process.go b/internal/armada/submit/conversion/post_process.go index 0c58aa9269c..3e83472816f 100644 --- a/internal/armada/submit/conversion/post_process.go +++ b/internal/armada/submit/conversion/post_process.go @@ -155,7 +155,7 @@ func defaultTerminationGracePeriod(spec *v1.PodSpec, config configuration.Submis // Default's the job's GangNodeUniformityLabelAnnotation for gang jobs that do not define one. func defaultGangNodeUniformityLabel(msg *armadaevents.SubmitJob, config configuration.SubmissionConfig) { - annotations := msg.MainObject.GetObjectMeta().GetAnnotations() + annotations := msg.GetObjectMeta().GetAnnotations() if annotations == nil { return } diff --git a/internal/armada/submit/conversion/post_process_test.go b/internal/armada/submit/conversion/post_process_test.go index 111ed37da97..7bf385f4c05 100644 --- a/internal/armada/submit/conversion/post_process_test.go +++ b/internal/armada/submit/conversion/post_process_test.go @@ -587,10 +587,8 @@ func TestDefaultTerminationGracePeriod(t *testing.T) { func submitMsgFromAnnotations(annotations map[string]string) *armadaevents.SubmitJob { return &armadaevents.SubmitJob{ - MainObject: &armadaevents.KubernetesMainObject{ - ObjectMeta: &armadaevents.ObjectMeta{ - Annotations: annotations, - }, + ObjectMeta: &armadaevents.ObjectMeta{ + Annotations: annotations, }, } } diff --git a/internal/armada/submit/deduplication_test.go b/internal/armada/submit/deduplication_test.go index 5e275b4e0c9..8a1bd21b8fe 100644 --- a/internal/armada/submit/deduplication_test.go +++ b/internal/armada/submit/deduplication_test.go @@ -4,11 +4,12 @@ import ( "testing" "time" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/pkg/api" ) @@ -17,26 +18,6 @@ type deduplicationIdsWithQueue struct { kvs map[string]string } -type InMemoryKeyValueStore struct { - kvs map[string][]byte -} - -func (m *InMemoryKeyValueStore) Store(_ *armadacontext.Context, kvs map[string][]byte) error { - maps.Copy(m.kvs, kvs) - return nil -} - -func (m *InMemoryKeyValueStore) Load(_ *armadacontext.Context, keys []string) (map[string][]byte, error) { - result := make(map[string][]byte, len(keys)) - for _, k := range keys { - v, ok := m.kvs[k] - if ok { - result[k] = v - } - } - return result, nil -} - func TestDeduplicator(t *testing.T) { tests := map[string]struct { initialKeys []deduplicationIdsWithQueue @@ -110,19 +91,24 @@ func TestDeduplicator(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) - deduplicator := NewDeduplicator(&InMemoryKeyValueStore{kvs: map[string][]byte{}}) + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + deduplicator := NewDeduplicator(db) + + // Store + for _, keys := range tc.initialKeys { + err := deduplicator.StoreOriginalJobIds(ctx, keys.queue, keys.kvs) + require.NoError(t, err) + } - // Store - for _, keys := range tc.initialKeys { - err := deduplicator.StoreOriginalJobIds(ctx, keys.queue, keys.kvs) + // Fetch + keys, err := deduplicator.GetOriginalJobIds(ctx, tc.queueToFetch, tc.jobsToFetch) require.NoError(t, err) - } - // Fetch - keys, err := deduplicator.GetOriginalJobIds(ctx, tc.queueToFetch, tc.jobsToFetch) - require.NoError(t, err) + assert.Equal(t, tc.expectedKeys, keys) - assert.Equal(t, tc.expectedKeys, keys) + return nil + }) + assert.NoError(t, err) cancel() }) } diff --git a/internal/armada/submit/deduplicaton.go b/internal/armada/submit/deduplicaton.go index d1cc6bcc805..37833611c5f 100644 --- a/internal/armada/submit/deduplicaton.go +++ b/internal/armada/submit/deduplicaton.go @@ -1,13 +1,12 @@ package submit import ( - "crypto/sha1" "fmt" + "github.com/jackc/pgx/v5/pgxpool" "golang.org/x/exp/maps" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/pgkeyvalue" "github.com/armadaproject/armada/pkg/api" ) @@ -19,21 +18,19 @@ type Deduplicator interface { // PostgresDeduplicator is an implementation of a Deduplicator that uses a pgkeyvalue.KeyValueStore as its state store type PostgresDeduplicator struct { - kvStore pgkeyvalue.KeyValueStore + db *pgxpool.Pool } -func NewDeduplicator(kvStore pgkeyvalue.KeyValueStore) *PostgresDeduplicator { - return &PostgresDeduplicator{kvStore: kvStore} +func NewDeduplicator(db *pgxpool.Pool) *PostgresDeduplicator { + return &PostgresDeduplicator{db: db} } func (s *PostgresDeduplicator) GetOriginalJobIds(ctx *armadacontext.Context, queue string, jobRequests []*api.JobSubmitRequestItem) (map[string]string, error) { // Armada checks for duplicate job submissions if a ClientId (i.e. a deduplication id) is provided. - // Deduplication is based on storing the combined hash of the ClientId and queue. For storage efficiency, - // we store hashes instead of user-provided strings. - kvs := make(map[string][]byte, len(jobRequests)) + kvs := make(map[string]string, len(jobRequests)) for _, req := range jobRequests { if req.ClientId != "" { - kvs[s.jobKey(queue, req.ClientId)] = []byte(req.ClientId) + kvs[s.jobKey(queue, req.ClientId)] = req.ClientId } } @@ -41,14 +38,14 @@ func (s *PostgresDeduplicator) GetOriginalJobIds(ctx *armadacontext.Context, que // If we have any client Ids, retrieve their job ids if len(kvs) > 0 { keys := maps.Keys(kvs) - existingKvs, err := s.kvStore.Load(ctx, keys) + existingKvs, err := s.loadMappings(ctx, keys) if err != nil { return nil, err } for k, v := range kvs { originalJobId, ok := existingKvs[k] if ok { - duplicates[string(v)] = string(originalJobId) + duplicates[v] = originalJobId } } } @@ -56,18 +53,70 @@ func (s *PostgresDeduplicator) GetOriginalJobIds(ctx *armadacontext.Context, que } func (s *PostgresDeduplicator) StoreOriginalJobIds(ctx *armadacontext.Context, queue string, mappings map[string]string) error { - if s.kvStore == nil || len(mappings) == 0 { + if len(mappings) == 0 { return nil } - kvs := make(map[string][]byte, len(mappings)) + kvs := make(map[string]string, len(mappings)) for k, v := range mappings { - kvs[s.jobKey(queue, k)] = []byte(v) + kvs[s.jobKey(queue, k)] = v } - return s.kvStore.Store(ctx, kvs) + return s.storeMappings(ctx, kvs) } func (s *PostgresDeduplicator) jobKey(queue, clientId string) string { - combined := fmt.Sprintf("%s:%s", queue, clientId) - h := sha1.Sum([]byte(combined)) - return fmt.Sprintf("%x", h) + return fmt.Sprintf("%s:%s", queue, clientId) +} + +func (s *PostgresDeduplicator) storeMappings(ctx *armadacontext.Context, mappings map[string]string) error { + deduplicationIDs := make([]string, 0, len(mappings)) + jobIDs := make([]string, 0, len(mappings)) + + for deduplicationID, jobID := range mappings { + deduplicationIDs = append(deduplicationIDs, deduplicationID) + jobIDs = append(jobIDs, jobID) + } + + sql := ` + INSERT INTO job_deduplication (deduplication_id, job_id) + SELECT unnest($1::text[]), unnest($2::text[]) + ON CONFLICT (deduplication_id) DO NOTHING + ` + _, err := s.db.Exec(ctx, sql, deduplicationIDs, jobIDs) + if err != nil { + return err + } + + return nil +} + +func (s *PostgresDeduplicator) loadMappings(ctx *armadacontext.Context, keys []string) (map[string]string, error) { + // Prepare the output map + result := make(map[string]string) + + sql := ` + SELECT deduplication_id, job_id + FROM job_deduplication + WHERE deduplication_id = ANY($1) + ` + + rows, err := s.db.Query(ctx, sql, keys) + if err != nil { + return nil, err + } + defer rows.Close() + + // Iterate through the result rows + for rows.Next() { + var deduplicationID, jobID string + if err := rows.Scan(&deduplicationID, &jobID); err != nil { + return nil, err + } + result[deduplicationID] = jobID + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil } diff --git a/internal/armada/submit/submit.go b/internal/armada/submit/submit.go index f8a93646ff0..f66a4dc7434 100644 --- a/internal/armada/submit/submit.go +++ b/internal/armada/submit/submit.go @@ -3,32 +3,24 @@ package submit import ( "context" "fmt" - "math" "github.com/gogo/protobuf/types" "github.com/gogo/status" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/armada/permissions" - "github.com/armadaproject/armada/internal/armada/repository" - "github.com/armadaproject/armada/internal/armada/server" + armadaqueue "github.com/armadaproject/armada/internal/armada/queue" "github.com/armadaproject/armada/internal/armada/submit/conversion" "github.com/armadaproject/armada/internal/armada/submit/validation" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/auth/permission" - "github.com/armadaproject/armada/internal/common/eventutil" - "github.com/armadaproject/armada/internal/common/pointer" "github.com/armadaproject/armada/internal/common/pulsarutils" - armadaslices "github.com/armadaproject/armada/internal/common/slices" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/scheduler" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" "github.com/armadaproject/armada/pkg/client/queue" @@ -37,34 +29,31 @@ import ( // Server is a service that accepts API calls according to the original Armada submit API and publishes messages // to Pulsar based on those calls. type Server struct { + queueService api.QueueServiceServer publisher pulsarutils.Publisher - queueRepository repository.QueueRepository - jobRepository repository.JobRepository + queueCache armadaqueue.ReadOnlyQueueRepository submissionConfig configuration.SubmissionConfig deduplicator Deduplicator - submitChecker scheduler.SubmitScheduleChecker - authorizer server.ActionAuthorizer + authorizer auth.ActionAuthorizer // Below are used only for testing clock clock.Clock idGenerator func() *armadaevents.Uuid } func NewServer( + queueService api.QueueServiceServer, publisher pulsarutils.Publisher, - queueRepository repository.QueueRepository, - jobRepository repository.JobRepository, + queueCache armadaqueue.ReadOnlyQueueRepository, submissionConfig configuration.SubmissionConfig, deduplicator Deduplicator, - submitChecker scheduler.SubmitScheduleChecker, - authorizer server.ActionAuthorizer, + authorizer auth.ActionAuthorizer, ) *Server { return &Server{ + queueService: queueService, publisher: publisher, - queueRepository: queueRepository, - jobRepository: jobRepository, + queueCache: queueCache, submissionConfig: submissionConfig, deduplicator: deduplicator, - submitChecker: submitChecker, authorizer: authorizer, clock: clock.RealClock{}, idGenerator: func() *armadaevents.Uuid { @@ -149,24 +138,6 @@ func (s *Server) SubmitJobs(grpcCtx context.Context, req *api.JobSubmitRequest) Groups: groups, Events: submitMsgs, } - if canSchedule, reason := s.submitChecker.CheckApiJobs(es, s.submissionConfig.DefaultPriorityClassName); !canSchedule { - return nil, status.Errorf(codes.InvalidArgument, "at least one job or gang is unschedulable:\n%s", reason) - } - - pulsarJobDetails := armadaslices.Map( - jobResponses, - func(r *api.JobSubmitResponseItem) *schedulerobjects.PulsarSchedulerJobDetails { - return &schedulerobjects.PulsarSchedulerJobDetails{ - JobId: r.JobId, - Queue: req.Queue, - JobSet: req.JobSetId, - } - }) - - if err = s.jobRepository.StorePulsarSchedulerJobDetails(ctx, pulsarJobDetails); err != nil { - log.WithError(err).Error("failed store pulsar job details") - return nil, status.Error(codes.Internal, "failed store pulsar job details") - } err = s.publisher.PublishMessages(ctx, es) if err != nil { @@ -184,21 +155,14 @@ func (s *Server) SubmitJobs(grpcCtx context.Context, req *api.JobSubmitRequest) func (s *Server) CancelJobs(grpcCtx context.Context, req *api.JobCancelRequest) (*api.CancellationResult, error) { ctx := armadacontext.FromGrpcCtx(grpcCtx) - - if req.JobSetId == "" || req.Queue == "" { - ctx. - WithField("apidatamissing", "true"). - Warnf("Cancel jobs called with missing data: jobId=%s, jobset=%s, queue=%s, user=%s", req.JobId, req.JobSetId, req.Queue, s.GetUser(ctx)) + jobIds := []string{} + jobIds = append(jobIds, req.JobIds...) + if req.JobId != "" { + jobIds = append(jobIds, req.JobId) } + jobIds = slices.Unique(jobIds) - // separate code path for multiple jobs - if len(req.JobIds) > 0 { - return s.cancelJobsByIdsQueueJobset(ctx, req.JobIds, req.Queue, req.JobSetId, req.Reason) - } - - // Another separate code path for cancelling an entire job set - // TODO: We should deprecate this and move people over to CancelJobSet() - if req.JobId == "" { + if len(jobIds) == 0 { log.Warnf("CancelJobs called for queue=%s and jobset=%s but with empty job id. Redirecting to CancelJobSet()", req.Queue, req.JobSetId) _, err := s.CancelJobSet(ctx, &api.JobSetCancelRequest{ Queue: req.Queue, @@ -213,85 +177,34 @@ func (s *Server) CancelJobs(grpcCtx context.Context, req *api.JobCancelRequest) }, nil } - // resolve the queue and jobset of the job: we can't trust what the user has given us - resolvedQueue, resolvedJobset, err := s.resolveQueueAndJobsetForJob(ctx, req.JobId) + err := validation.ValidateQueueAndJobSet(req) if err != nil { return nil, err } - // If both a job id and queue or jobsetId is provided, return ErrNotFound if they don't match, - // since the job could not be found for the provided queue/jobSetId. - if req.Queue != "" && req.Queue != resolvedQueue { - return nil, &armadaerrors.ErrNotFound{ - Type: "job", - Value: req.JobId, - Message: fmt.Sprintf("job not found in queue %s, try waiting", req.Queue), - } - } - if req.JobSetId != "" && req.JobSetId != resolvedJobset { - return nil, &armadaerrors.ErrNotFound{ - Type: "job", - Value: req.JobId, - Message: fmt.Sprintf("job not found in job set %s, try waiting", req.JobSetId), - } - } - - userId, groups, err := s.authorize(ctx, resolvedQueue, permissions.CancelAnyJobs, queue.PermissionVerbCancel) - if err != nil { - return nil, err - } - - jobId, err := armadaevents.ProtoUuidFromUlidString(req.JobId) + userId, groups, err := s.authorize(ctx, req.Queue, permissions.CancelAnyJobs, queue.PermissionVerbCancel) if err != nil { return nil, err } - sequence := &armadaevents.EventSequence{ - Queue: resolvedQueue, - JobSetName: resolvedJobset, - UserId: userId, - Groups: groups, - Events: []*armadaevents.EventSequence_Event{ - { - Created: pointer.Now(), - Event: &armadaevents.EventSequence_Event_CancelJob{ - CancelJob: &armadaevents.CancelJob{ - JobId: jobId, - Reason: util.Truncate(req.Reason, 512), - }, - }, - }, - }, - } + var cancelledIds []string + es, cancelledIds := eventSequenceForJobIds(s.clock, jobIds, req.Queue, req.JobSetId, userId, groups, req.Reason) - // we can send the message to cancel to both schedulers. If the scheduler it doesn't belong to it'll be a no-op - err = s.publisher.PublishMessages(ctx, sequence) + err = s.publisher.PublishMessages(ctx, es) if err != nil { log.WithError(err).Error("failed send to Pulsar") return nil, status.Error(codes.Internal, "Failed to send message") } - return &api.CancellationResult{ - CancelledIds: []string{req.JobId}, // indicates no error + CancelledIds: cancelledIds, }, nil } func (s *Server) PreemptJobs(grpcCtx context.Context, req *api.JobPreemptRequest) (*types.Empty, error) { ctx := armadacontext.FromGrpcCtx(grpcCtx) - - if req.Queue == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "Queue", - Value: req.Queue, - Message: "queue cannot be empty when preempting jobs", - } - } - if req.JobSetId == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "JobSetId", - Value: req.JobSetId, - Message: "jobset cannot be empty when preempting jobs", - } + err := validation.ValidateQueueAndJobSet(req) + if err != nil { + return nil, err } userId, groups, err := s.authorize(ctx, req.Queue, permissions.PreemptAnyJobs, queue.PermissionVerbPreempt) @@ -299,12 +212,11 @@ func (s *Server) PreemptJobs(grpcCtx context.Context, req *api.JobPreemptRequest return nil, err } - sequence, err := preemptJobEventSequenceForJobIds(req.JobIds, req.Queue, req.JobSetId, userId, groups) + sequence, err := preemptJobEventSequenceForJobIds(s.clock, req.JobIds, req.Queue, req.JobSetId, userId, groups) if err != nil { return nil, err } - // send the message to both schedulers because jobs may be on either err = s.publisher.PublishMessages(ctx, sequence) if err != nil { log.WithError(err).Error("failed send to Pulsar") @@ -314,7 +226,7 @@ func (s *Server) PreemptJobs(grpcCtx context.Context, req *api.JobPreemptRequest return &types.Empty{}, nil } -func preemptJobEventSequenceForJobIds(jobIds []string, q, jobSet, userId string, groups []string) (*armadaevents.EventSequence, error) { +func preemptJobEventSequenceForJobIds(clock clock.Clock, jobIds []string, q, jobSet, userId string, groups []string) (*armadaevents.EventSequence, error) { sequence := &armadaevents.EventSequence{ Queue: q, JobSetName: jobSet, @@ -322,6 +234,7 @@ func preemptJobEventSequenceForJobIds(jobIds []string, q, jobSet, userId string, Groups: groups, Events: []*armadaevents.EventSequence_Event{}, } + eventTime := clock.Now().UTC() for _, jobIdStr := range jobIds { jobId, err := armadaevents.ProtoUuidFromUlidString(jobIdStr) if err != nil { @@ -329,10 +242,11 @@ func preemptJobEventSequenceForJobIds(jobIds []string, q, jobSet, userId string, return nil, fmt.Errorf("could not convert job id to uuid: %s", jobIdStr) } sequence.Events = append(sequence.Events, &armadaevents.EventSequence_Event{ - Created: pointer.Now(), + Created: &eventTime, Event: &armadaevents.EventSequence_Event_JobPreemptionRequested{ JobPreemptionRequested: &armadaevents.JobPreemptionRequested{ - JobId: jobId, + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), }, }, }) @@ -342,83 +256,33 @@ func preemptJobEventSequenceForJobIds(jobIds []string, q, jobSet, userId string, func (s *Server) ReprioritizeJobs(grpcCtx context.Context, req *api.JobReprioritizeRequest) (*api.JobReprioritizeResponse, error) { ctx := armadacontext.FromGrpcCtx(grpcCtx) - - if req.JobSetId == "" || req.Queue == "" { - ctx. - WithField("apidatamissing", "true"). - Warnf("Reprioritize jobs called with missing data: jobId=%s, jobset=%s, queue=%s, user=%s", req.JobIds[0], req.JobSetId, req.Queue, s.GetUser(ctx)) - } - - // If either queue or jobSetId is missing, we get the job set and queue associated - // with the first job id in the request. - // - // This must be done before checking auth, since the auth check expects a queue. - if len(req.JobIds) > 0 && (req.Queue == "" || req.JobSetId == "") { - firstJobId := req.JobIds[0] - - resolvedQueue, resolvedJobset, err := s.resolveQueueAndJobsetForJob(ctx, firstJobId) - if err != nil { - return nil, err - } - - // If both a job id and queue or jobsetId is provided, return ErrNotFound if they don't match, - // since the job could not be found for the provided queue/jobSetId. - // If both a job id and queue or jobsetId is provided, return ErrNotFound if they don't match, - // since the job could not be found for the provided queue/jobSetId. - if req.Queue != "" && req.Queue != resolvedQueue { - return nil, &armadaerrors.ErrNotFound{ - Type: "job", - Value: firstJobId, - Message: fmt.Sprintf("job not found in queue %s, try waiting", req.Queue), - } - } - if req.JobSetId != "" && req.JobSetId != resolvedJobset { - return nil, &armadaerrors.ErrNotFound{ - Type: "job", - Value: firstJobId, - Message: fmt.Sprintf("job not found in job set %s, try waiting", req.JobSetId), - } - } - req.Queue = resolvedQueue - req.JobSetId = resolvedJobset + err := validation.ValidateQueueAndJobSet(req) + if err != nil { + return nil, err } - // TODO: this is incorrect we only validate the permissions on the first job but the other jobs may belong to different queues userId, groups, err := s.authorize(ctx, req.Queue, permissions.ReprioritizeAnyJobs, queue.PermissionVerbReprioritize) if err != nil { return nil, err } - if req.Queue == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "Queue", - Value: req.Queue, - Message: "queue is empty", - } - } - if req.JobSetId == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "JobSetId", - Value: req.JobSetId, - Message: "JobSetId is empty", - } - } - priority := eventutil.LogSubmitPriorityFromApiPriority(req.NewPriority) - // results maps job ids to strings containing error messages. results := make(map[string]string) + priority := conversion.PriorityAsInt32(req.NewPriority) sequence := &armadaevents.EventSequence{ Queue: req.Queue, - JobSetName: req.JobSetId, // TODO: this is incorrect- the jobs may be for different jobsets + JobSetName: req.JobSetId, UserId: userId, Groups: groups, Events: make([]*armadaevents.EventSequence_Event, len(req.JobIds), len(req.JobIds)), } + eventTime := s.clock.Now().UTC() // No job ids implicitly indicates that all jobs in the job set should be re-prioritised. if len(req.JobIds) == 0 { sequence.Events = append(sequence.Events, &armadaevents.EventSequence_Event{ + Created: &eventTime, Event: &armadaevents.EventSequence_Event_ReprioritiseJobSet{ ReprioritiseJobSet: &armadaevents.ReprioritiseJobSet{ Priority: priority, @@ -431,7 +295,6 @@ func (s *Server) ReprioritizeJobs(grpcCtx context.Context, req *api.JobRepriorit // Otherwise, only the specified jobs should be re-prioritised. for i, jobIdString := range req.JobIds { - jobId, err := armadaevents.ProtoUuidFromUlidString(jobIdString) if err != nil { results[jobIdString] = err.Error() @@ -439,9 +302,11 @@ func (s *Server) ReprioritizeJobs(grpcCtx context.Context, req *api.JobRepriorit } sequence.Events[i] = &armadaevents.EventSequence_Event{ + Created: &eventTime, Event: &armadaevents.EventSequence_Event_ReprioritiseJob{ ReprioritiseJob: &armadaevents.ReprioritiseJob{ JobId: jobId, + JobIdStr: jobIdString, Priority: priority, }, }, @@ -451,7 +316,6 @@ func (s *Server) ReprioritizeJobs(grpcCtx context.Context, req *api.JobRepriorit } err = s.publisher.PublishMessages(ctx, sequence) - if err != nil { log.WithError(err).Error("failed send to Pulsar") return nil, status.Error(codes.Internal, "Failed to send message") @@ -464,22 +328,12 @@ func (s *Server) ReprioritizeJobs(grpcCtx context.Context, req *api.JobRepriorit func (s *Server) CancelJobSet(grpcCtx context.Context, req *api.JobSetCancelRequest) (*types.Empty, error) { ctx := armadacontext.FromGrpcCtx(grpcCtx) - if req.Queue == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "Queue", - Value: req.Queue, - Message: "queue cannot be empty when cancelling a jobset", - } - } - if req.JobSetId == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "JobSetId", - Value: req.JobSetId, - Message: "jobsetId cannot be empty when cancelling a jobset", - } + err := validation.ValidateQueueAndJobSet(req) + if err != nil { + return nil, err } - err := validateJobSetFilter(req.Filter) + err = validation.ValidateJobSetFilter(req.Filter) if err != nil { return nil, err } @@ -500,6 +354,7 @@ func (s *Server) CancelJobSet(grpcCtx context.Context, req *api.JobSetCancelRequ states[i] = armadaevents.JobState_RUNNING } } + eventTime := s.clock.Now().UTC() pulsarSchedulerSequence := &armadaevents.EventSequence{ Queue: req.Queue, JobSetName: req.JobSetId, @@ -507,7 +362,7 @@ func (s *Server) CancelJobSet(grpcCtx context.Context, req *api.JobSetCancelRequ Groups: groups, Events: []*armadaevents.EventSequence_Event{ { - Created: pointer.Now(), + Created: &eventTime, Event: &armadaevents.EventSequence_Event_CancelJobSet{ CancelJobSet: &armadaevents.CancelJobSet{ States: states, @@ -526,49 +381,18 @@ func (s *Server) CancelJobSet(grpcCtx context.Context, req *api.JobSetCancelRequ return &types.Empty{}, err } -// Assumes all Job IDs are in the queue and job set provided -func (s *Server) cancelJobsByIdsQueueJobset(grpcCtx context.Context, jobIds []string, q, jobSet string, reason string) (*api.CancellationResult, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - if q == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "Queue", - Value: "", - Message: "Queue cannot be empty when cancelling multiple jobs", - } - } - if jobSet == "" { - return nil, &armadaerrors.ErrInvalidArgument{ - Name: "Jobset", - Value: "", - Message: "Jobset cannot be empty when cancelling multiple jobs", - } - } - userId, groups, err := s.authorize(ctx, q, permissions.CancelAnyJobs, queue.PermissionVerbCancel) - if err != nil { - return nil, err - } - var cancelledIds []string - es, cancelledIds := eventSequenceForJobIds(jobIds, q, jobSet, userId, groups, reason) - err = s.publisher.PublishMessages(ctx, es) - if err != nil { - log.WithError(err).Error("failed send to Pulsar") - return nil, status.Error(codes.Internal, "Failed to send message") - } - return &api.CancellationResult{ - CancelledIds: cancelledIds, - }, nil -} - // Returns event sequence along with all valid job ids in the sequence -func eventSequenceForJobIds(jobIds []string, q, jobSet, userId string, groups []string, reason string) (*armadaevents.EventSequence, []string) { +func eventSequenceForJobIds(clock clock.Clock, jobIds []string, queue, jobSet, userId string, groups []string, reason string) (*armadaevents.EventSequence, []string) { sequence := &armadaevents.EventSequence{ - Queue: q, + Queue: queue, JobSetName: jobSet, UserId: userId, Groups: groups, Events: []*armadaevents.EventSequence_Event{}, } var validIds []string + truncatedReason := util.Truncate(reason, 512) + eventTime := clock.Now().UTC() for _, jobIdStr := range jobIds { jobId, err := armadaevents.ProtoUuidFromUlidString(jobIdStr) if err != nil { @@ -577,11 +401,12 @@ func eventSequenceForJobIds(jobIds []string, q, jobSet, userId string, groups [] } validIds = append(validIds, jobIdStr) sequence.Events = append(sequence.Events, &armadaevents.EventSequence_Event{ - Created: pointer.Now(), + Created: &eventTime, Event: &armadaevents.EventSequence_Event_CancelJob{ CancelJob: &armadaevents.CancelJob{ - JobId: jobId, - Reason: util.Truncate(reason, 512), + JobId: jobId, + JobIdStr: jobIdStr, + Reason: truncatedReason, }, }, }) @@ -589,207 +414,6 @@ func eventSequenceForJobIds(jobIds []string, q, jobSet, userId string, groups [] return sequence, validIds } -// resolveQueueAndJobsetForJob returns the queue and jobset for a job. -// If no job can be retrieved then an error is returned. -func (s *Server) resolveQueueAndJobsetForJob(ctx *armadacontext.Context, jobId string) (string, string, error) { - jobDetails, err := s.jobRepository.GetPulsarSchedulerJobDetails(ctx, jobId) - if err != nil { - return "", "", err - } - if jobDetails != nil { - return jobDetails.Queue, jobDetails.JobSet, nil - } - return "", "", &armadaerrors.ErrNotFound{ - Type: "job", - Value: jobId, - } -} - -func validateJobSetFilter(filter *api.JobSetFilter) error { - if filter == nil { - return nil - } - providedStatesSet := map[string]bool{} - for _, state := range filter.States { - providedStatesSet[state.String()] = true - } - for _, state := range filter.States { - if state == api.JobState_PENDING { - if _, present := providedStatesSet[api.JobState_RUNNING.String()]; !present { - return fmt.Errorf("unsupported state combination - state %s and %s must always be used together", - api.JobState_PENDING, api.JobState_RUNNING) - } - } - - if state == api.JobState_RUNNING { - if _, present := providedStatesSet[api.JobState_PENDING.String()]; !present { - return fmt.Errorf("unsupported state combination - state %s and %s must always be used together", - api.JobState_PENDING, api.JobState_RUNNING) - } - } - } - - return nil -} - -func (s *Server) CreateQueue(grpcCtx context.Context, req *api.Queue) (*types.Empty, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - err := s.authorizer.AuthorizeAction(ctx, permissions.CreateQueue) - var ep *armadaerrors.ErrUnauthorized - if errors.As(err, &ep) { - return nil, status.Errorf(codes.PermissionDenied, "[CreateQueue] error creating queue %s: %s", req.Name, ep) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[CreateQueue] error checking permissions: %s", err) - } - - if len(req.UserOwners) == 0 { - principal := authorization.GetPrincipal(ctx) - req.UserOwners = []string{principal.GetName()} - } - - queue, err := queue.NewQueue(req) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "[CreateQueue] error validating queue: %s", err) - } - - err = s.queueRepository.CreateQueue(ctx, queue) - var eq *repository.ErrQueueAlreadyExists - if errors.As(err, &eq) { - return nil, status.Errorf(codes.AlreadyExists, "[CreateQueue] error creating queue: %s", err) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[CreateQueue] error creating queue: %s", err) - } - - return &types.Empty{}, nil -} - -func (s *Server) CreateQueues(grpcCtx context.Context, req *api.QueueList) (*api.BatchQueueCreateResponse, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - var failedQueues []*api.QueueCreateResponse - // Create a queue for each element of the request body and return the failures. - for _, queue := range req.Queues { - _, err := s.CreateQueue(ctx, queue) - if err != nil { - failedQueues = append(failedQueues, &api.QueueCreateResponse{ - Queue: queue, - Error: err.Error(), - }) - } - } - - return &api.BatchQueueCreateResponse{ - FailedQueues: failedQueues, - }, nil -} - -func (s *Server) UpdateQueue(grpcCtx context.Context, req *api.Queue) (*types.Empty, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - err := s.authorizer.AuthorizeAction(ctx, permissions.CreateQueue) - var ep *armadaerrors.ErrUnauthorized - if errors.As(err, &ep) { - return nil, status.Errorf(codes.PermissionDenied, "[UpdateQueue] error updating queue %s: %s", req.Name, ep) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[UpdateQueue] error checking permissions: %s", err) - } - - queue, err := queue.NewQueue(req) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "[UpdateQueue] error: %s", err) - } - - err = s.queueRepository.UpdateQueue(ctx, queue) - var e *repository.ErrQueueNotFound - if errors.As(err, &e) { - return nil, status.Errorf(codes.NotFound, "[UpdateQueue] error: %s", err) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[UpdateQueue] error getting queue %q: %s", queue.Name, err) - } - - return &types.Empty{}, nil -} - -func (s *Server) UpdateQueues(grpcCtx context.Context, req *api.QueueList) (*api.BatchQueueUpdateResponse, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - var failedQueues []*api.QueueUpdateResponse - - // Create a queue for each element of the request body and return the failures. - for _, queue := range req.Queues { - _, err := s.UpdateQueue(ctx, queue) - if err != nil { - failedQueues = append(failedQueues, &api.QueueUpdateResponse{ - Queue: queue, - Error: err.Error(), - }) - } - } - - return &api.BatchQueueUpdateResponse{ - FailedQueues: failedQueues, - }, nil -} - -func (s *Server) DeleteQueue(grpcCtx context.Context, req *api.QueueDeleteRequest) (*types.Empty, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - err := s.authorizer.AuthorizeAction(ctx, permissions.DeleteQueue) - var ep *armadaerrors.ErrUnauthorized - if errors.As(err, &ep) { - return nil, status.Errorf(codes.PermissionDenied, "[DeleteQueue] error deleting queue %s: %s", req.Name, ep) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[DeleteQueue] error checking permissions: %s", err) - } - err = s.queueRepository.DeleteQueue(ctx, req.Name) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "[DeleteQueue] error deleting queue %s: %s", req.Name, err) - } - return &types.Empty{}, nil -} - -func (s *Server) GetQueue(grpcCtx context.Context, req *api.QueueGetRequest) (*api.Queue, error) { - ctx := armadacontext.FromGrpcCtx(grpcCtx) - queue, err := s.queueRepository.GetQueue(ctx, req.Name) - var e *repository.ErrQueueNotFound - if errors.As(err, &e) { - return nil, status.Errorf(codes.NotFound, "[GetQueue] error: %s", err) - } else if err != nil { - return nil, status.Errorf(codes.Unavailable, "[GetQueue] error getting queue %q: %s", req.Name, err) - } - return queue.ToAPI(), nil -} - -func (s *Server) GetQueues(req *api.StreamingQueueGetRequest, stream api.Submit_GetQueuesServer) error { - ctx := armadacontext.FromGrpcCtx(stream.Context()) - - // Receive once to get information about the number of queues to return - numToReturn := req.GetNum() - if numToReturn < 1 { - numToReturn = math.MaxUint32 - } - - queues, err := s.queueRepository.GetAllQueues(ctx) - if err != nil { - return err - } - for i, queue := range queues { - if uint32(i) < numToReturn { - err := stream.Send(&api.StreamingQueueMessage{ - Event: &api.StreamingQueueMessage_Queue{Queue: queue.ToAPI()}, - }) - if err != nil { - return err - } - } - } - err = stream.Send(&api.StreamingQueueMessage{ - Event: &api.StreamingQueueMessage_End{ - End: &api.EndMarker{}, - }, - }) - if err != nil { - return err - } - return nil -} - // authorize authorizes a user request to submit a state transition message to the log. // User information used for authorization is extracted from the provided context. // Checks that the user has either anyPerm (e.g., permissions.SubmitAnyJobs) or perm (e.g., PermissionVerbSubmit) for this queue. @@ -800,10 +424,10 @@ func (s *Server) authorize( anyPerm permission.Permission, perm queue.PermissionVerb, ) (string, []string, error) { - principal := authorization.GetPrincipal(ctx) + principal := auth.GetPrincipal(ctx) userId := principal.GetName() groups := principal.GetGroupNames() - q, err := s.queueRepository.GetQueue(ctx, queueName) + q, err := s.queueCache.GetQueue(ctx, queueName) if err != nil { return userId, groups, err } @@ -812,7 +436,7 @@ func (s *Server) authorize( } func (s *Server) GetUser(ctx *armadacontext.Context) string { - principal := authorization.GetPrincipal(ctx) + principal := auth.GetPrincipal(ctx) return principal.GetName() } @@ -820,3 +444,33 @@ func (s *Server) Health(_ context.Context, _ *types.Empty) (*api.HealthCheckResp // For now, lets make the health check really simple. return &api.HealthCheckResponse{Status: api.HealthCheckResponse_SERVING}, nil } + +// Functions below are deprecated + +func (s *Server) CreateQueue(ctx context.Context, q *api.Queue) (*types.Empty, error) { + return s.queueService.CreateQueue(ctx, q) +} + +func (s *Server) CreateQueues(ctx context.Context, list *api.QueueList) (*api.BatchQueueCreateResponse, error) { + return s.queueService.CreateQueues(ctx, list) +} + +func (s *Server) UpdateQueue(ctx context.Context, q *api.Queue) (*types.Empty, error) { + return s.queueService.UpdateQueue(ctx, q) +} + +func (s *Server) UpdateQueues(ctx context.Context, list *api.QueueList) (*api.BatchQueueUpdateResponse, error) { + return s.queueService.UpdateQueues(ctx, list) +} + +func (s *Server) DeleteQueue(ctx context.Context, request *api.QueueDeleteRequest) (*types.Empty, error) { + return s.queueService.DeleteQueue(ctx, request) +} + +func (s *Server) GetQueue(ctx context.Context, request *api.QueueGetRequest) (*api.Queue, error) { + return s.queueService.GetQueue(ctx, request) +} + +func (s *Server) GetQueues(request *api.StreamingQueueGetRequest, server api.Submit_GetQueuesServer) error { + return s.queueService.GetQueues(request, server) +} diff --git a/internal/armada/submit/submit_test.go b/internal/armada/submit/submit_test.go index db557cc42fb..5572bc67a1d 100644 --- a/internal/armada/submit/submit_test.go +++ b/internal/armada/submit/submit_test.go @@ -8,36 +8,34 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "github.com/armadaproject/armada/internal/armada/mocks" "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/armada/submit/testfixtures" "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/auth/permission" + "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" "github.com/armadaproject/armada/pkg/client/queue" ) type mockObjects struct { - publisher *mocks.MockPublisher - queueRepo *mocks.MockQueueRepository - jobRep *mocks.MockJobRepository - deduplicator *mocks.MockDeduplicator - submitChecker *mocks.MockSubmitScheduleChecker - authorizer *mocks.MockActionAuthorizer + publisher *mocks.MockPublisher + queueRepo *mocks.MockQueueRepository + deduplicator *mocks.MockDeduplicator + authorizer *mocks.MockActionAuthorizer } func createMocks(t *testing.T) *mockObjects { ctrl := gomock.NewController(t) return &mockObjects{ - publisher: mocks.NewMockPublisher(ctrl), - queueRepo: mocks.NewMockQueueRepository(ctrl), - jobRep: mocks.NewMockJobRepository(ctrl), - deduplicator: mocks.NewMockDeduplicator(ctrl), - submitChecker: mocks.NewMockSubmitScheduleChecker(ctrl), - authorizer: mocks.NewMockActionAuthorizer(ctrl), + publisher: mocks.NewMockPublisher(ctrl), + queueRepo: mocks.NewMockQueueRepository(ctrl), + deduplicator: mocks.NewMockDeduplicator(ctrl), + authorizer: mocks.NewMockActionAuthorizer(ctrl), } } @@ -103,18 +101,6 @@ func TestSubmit_Success(t *testing.T) { StoreOriginalJobIds(ctx, testfixtures.DefaultQueue.Name, gomock.Any()). Times(1) - mockedObjects.submitChecker. - EXPECT(). - CheckApiJobs(gomock.Any(), testfixtures.DefaultPriorityClass). - Return(true, ""). - Times(1) - - mockedObjects.jobRep. - EXPECT(). - StorePulsarSchedulerJobDetails(ctx, gomock.Any()). - Return(nil). - Times(1) - expectedEventSequence := &armadaevents.EventSequence{ Queue: testfixtures.DefaultQueue.Name, JobSetName: testfixtures.DefaultJobset, @@ -203,43 +189,260 @@ func TestSubmit_FailedValidation(t *testing.T) { } } -func TestSubmit_SubmitCheckFailed(t *testing.T) { +func TestCancelJobs(t *testing.T) { + jobId1 := util.ULID().String() + jobId2 := util.ULID().String() tests := map[string]struct { - req *api.JobSubmitRequest + req *api.JobCancelRequest + expectedEvents []*armadaevents.EventSequence_Event }{ - "Submit check fails": { - req: testfixtures.SubmitRequestWithNItems(1), + "Cancel job using JobId": { + req: &api.JobCancelRequest{JobId: jobId1, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: testfixtures.CreateCancelJobSequenceEvents([]string{jobId1}), + }, + "Cancel jobs using JobIds": { + req: &api.JobCancelRequest{JobIds: []string{jobId1, jobId2}, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: testfixtures.CreateCancelJobSequenceEvents([]string{jobId1, jobId2}), + }, + "Cancel jobs using both JobId and JobIds": { + req: &api.JobCancelRequest{JobId: jobId1, JobIds: []string{jobId2}, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: testfixtures.CreateCancelJobSequenceEvents([]string{jobId2, jobId1}), + }, + "Cancel jobs using both JobId and JobIds - overlapping ids": { + req: &api.JobCancelRequest{JobId: jobId1, JobIds: []string{jobId1}, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: testfixtures.CreateCancelJobSequenceEvents([]string{jobId1}), + }, + "Cancel jobSet": { + req: &api.JobCancelRequest{Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: []*armadaevents.EventSequence_Event{testfixtures.CreateCancelJobSetSequenceEvent()}, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + ctx = armadacontext.WithValue(ctx, "principal", testfixtures.DefaultPrincipal) + server, mockedObjects := createTestServer(t) mockedObjects.queueRepo. - EXPECT().GetQueue(ctx, tc.req.Queue). + EXPECT(). + GetQueue(ctx, tc.req.Queue). Return(testfixtures.DefaultQueue, nil). Times(1) mockedObjects.authorizer. EXPECT(). - AuthorizeQueueAction(ctx, testfixtures.DefaultQueue, permissions.SubmitAnyJobs, queue.PermissionVerbSubmit). + AuthorizeQueueAction(ctx, testfixtures.DefaultQueue, permission.Permission(permissions.CancelAnyJobs), queue.PermissionVerbCancel). Return(nil). Times(1) - mockedObjects.deduplicator. + expectedEventSequence := &armadaevents.EventSequence{ + Queue: testfixtures.DefaultQueue.Name, + JobSetName: testfixtures.DefaultJobset, + UserId: testfixtures.DefaultOwner, + Groups: []string{"everyone", "groupA"}, + Events: tc.expectedEvents, + } + + var capturedEventSequence *armadaevents.EventSequence + mockedObjects.publisher.EXPECT(). + PublishMessages(ctx, gomock.Any()). + Times(1). + Do(func(_ interface{}, es *armadaevents.EventSequence) { + capturedEventSequence = es + }) + + _, err := server.CancelJobs(ctx, tc.req) + assert.NoError(t, err) + assert.Equal(t, expectedEventSequence, capturedEventSequence) + cancel() + }) + } +} + +func TestCancelJobs_FailedValidation(t *testing.T) { + jobId1 := util.ULID().String() + tests := map[string]struct { + req *api.JobCancelRequest + }{ + "Queue is empty": { + req: &api.JobCancelRequest{JobId: jobId1, JobSetId: testfixtures.DefaultJobset}, + }, + "Job set is empty": { + req: &api.JobCancelRequest{JobId: jobId1, Queue: testfixtures.DefaultQueue.Name}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + server, _ := createTestServer(t) + + resp, err := server.CancelJobs(ctx, tc.req) + assert.Error(t, err) + assert.Nil(t, resp) + cancel() + }) + } +} + +func TestPreemptJobs(t *testing.T) { + jobId1 := util.NewULID() + jobId2 := util.NewULID() + tests := map[string]struct { + req *api.JobPreemptRequest + expectedEvents []*armadaevents.EventSequence_Event + }{ + "Preempt jobs using JobIds": { + req: &api.JobPreemptRequest{JobIds: []string{jobId1, jobId2}, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset}, + expectedEvents: testfixtures.CreatePreemptJobSequenceEvents([]string{jobId1, jobId2}), + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + ctx = armadacontext.WithValue(ctx, "principal", testfixtures.DefaultPrincipal) + + server, mockedObjects := createTestServer(t) + + mockedObjects.queueRepo. EXPECT(). - GetOriginalJobIds(ctx, testfixtures.DefaultQueue.Name, tc.req.JobRequestItems). - Return(nil, nil). + GetQueue(ctx, tc.req.Queue). + Return(testfixtures.DefaultQueue, nil). Times(1) - mockedObjects.submitChecker. + mockedObjects.authorizer. EXPECT(). - CheckApiJobs(gomock.Any(), testfixtures.DefaultPriorityClass). - Return(false, ""). + AuthorizeQueueAction(ctx, testfixtures.DefaultQueue, permission.Permission(permissions.PreemptAnyJobs), queue.PermissionVerbPreempt). + Return(nil). Times(1) - resp, err := server.SubmitJobs(ctx, tc.req) + expectedEventSequence := &armadaevents.EventSequence{ + Queue: testfixtures.DefaultQueue.Name, + JobSetName: testfixtures.DefaultJobset, + UserId: testfixtures.DefaultOwner, + Groups: []string{"everyone", "groupA"}, + Events: tc.expectedEvents, + } + + var capturedEventSequence *armadaevents.EventSequence + mockedObjects.publisher.EXPECT(). + PublishMessages(ctx, gomock.Any()). + Times(1). + Do(func(_ interface{}, es *armadaevents.EventSequence) { + capturedEventSequence = es + }) + + _, err := server.PreemptJobs(ctx, tc.req) + assert.NoError(t, err) + assert.Equal(t, expectedEventSequence, capturedEventSequence) + cancel() + }) + } +} + +func TestPreemptJobs_FailedValidation(t *testing.T) { + jobId1 := util.ULID().String() + tests := map[string]struct { + req *api.JobPreemptRequest + }{ + "Queue is empty": { + req: &api.JobPreemptRequest{JobIds: []string{jobId1}, JobSetId: testfixtures.DefaultJobset}, + }, + "Job set is empty": { + req: &api.JobPreemptRequest{JobIds: []string{jobId1}, Queue: testfixtures.DefaultQueue.Name}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + server, _ := createTestServer(t) + + resp, err := server.PreemptJobs(ctx, tc.req) + assert.Error(t, err) + assert.Nil(t, resp) + cancel() + }) + } +} + +func TestReprioritizeJobs(t *testing.T) { + jobId1 := util.ULID().String() + jobId2 := util.ULID().String() + newPriority := float64(5) + tests := map[string]struct { + req *api.JobReprioritizeRequest + expectedEvents []*armadaevents.EventSequence_Event + }{ + "Reprioritize jobs using JobIds": { + req: &api.JobReprioritizeRequest{JobIds: []string{jobId1, jobId2}, Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset, NewPriority: newPriority}, + expectedEvents: testfixtures.CreateReprioritizeJobSequenceEvents([]string{jobId1, jobId2}, newPriority), + }, + "Reprioritize jobSet": { + req: &api.JobReprioritizeRequest{Queue: testfixtures.DefaultQueue.Name, JobSetId: testfixtures.DefaultJobset, NewPriority: newPriority}, + expectedEvents: []*armadaevents.EventSequence_Event{testfixtures.CreateReprioritizedJobSetSequenceEvent(newPriority)}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + ctx = armadacontext.WithValue(ctx, "principal", testfixtures.DefaultPrincipal) + + server, mockedObjects := createTestServer(t) + + mockedObjects.queueRepo. + EXPECT(). + GetQueue(ctx, tc.req.Queue). + Return(testfixtures.DefaultQueue, nil). + Times(1) + + mockedObjects.authorizer. + EXPECT(). + AuthorizeQueueAction(ctx, testfixtures.DefaultQueue, permission.Permission(permissions.ReprioritizeAnyJobs), queue.PermissionVerbReprioritize). + Return(nil). + Times(1) + + expectedEventSequence := &armadaevents.EventSequence{ + Queue: testfixtures.DefaultQueue.Name, + JobSetName: testfixtures.DefaultJobset, + UserId: testfixtures.DefaultOwner, + Groups: []string{"everyone", "groupA"}, + Events: tc.expectedEvents, + } + + var capturedEventSequence *armadaevents.EventSequence + mockedObjects.publisher.EXPECT(). + PublishMessages(ctx, gomock.Any()). + Times(1). + Do(func(_ interface{}, es *armadaevents.EventSequence) { + capturedEventSequence = es + }) + + _, err := server.ReprioritizeJobs(ctx, tc.req) + assert.NoError(t, err) + assert.Equal(t, expectedEventSequence, capturedEventSequence) + cancel() + }) + } +} + +func TestReprioritizeJobs_FailedValidation(t *testing.T) { + jobId1 := util.ULID().String() + tests := map[string]struct { + req *api.JobReprioritizeRequest + }{ + "Queue is empty": { + req: &api.JobReprioritizeRequest{JobIds: []string{jobId1}, JobSetId: testfixtures.DefaultJobset}, + }, + "Job set is empty": { + req: &api.JobReprioritizeRequest{JobIds: []string{jobId1}, Queue: testfixtures.DefaultQueue.Name}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + server, _ := createTestServer(t) + + resp, err := server.ReprioritizeJobs(ctx, tc.req) assert.Error(t, err) assert.Nil(t, resp) cancel() @@ -309,12 +512,11 @@ func withTerminationGracePeriod(req *api.JobSubmitRequest, v *int64) *api.JobSub func createTestServer(t *testing.T) (*Server, *mockObjects) { m := createMocks(t) server := NewServer( + nil, m.publisher, m.queueRepo, - m.jobRep, testfixtures.DefaultSubmissionConfig(), m.deduplicator, - m.submitChecker, m.authorizer) server.clock = clock.NewFakeClock(testfixtures.DefaultTime) server.idGenerator = testfixtures.TestUlidGenerator() diff --git a/internal/armada/submit/testfixtures/test_fixtures.go b/internal/armada/submit/testfixtures/test_fixtures.go index ac0d4ce86ee..c275632a139 100644 --- a/internal/armada/submit/testfixtures/test_fixtures.go +++ b/internal/armada/submit/testfixtures/test_fixtures.go @@ -9,7 +9,7 @@ import ( "k8s.io/utils/pointer" "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" armadaresource "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" @@ -21,7 +21,7 @@ var ( DefaultOwner = "testUser" DefaultJobset = "testJobset" DefaultQueue = queue.Queue{Name: "testQueue"} - DefaultPrincipal = authorization.NewStaticPrincipal(DefaultOwner, []string{"groupA"}) + DefaultPrincipal = auth.NewStaticPrincipal(DefaultOwner, []string{"groupA"}) DefaultContainerPort = v1.ContainerPort{ Name: "testContainerPort", ContainerPort: 8080, @@ -56,8 +56,6 @@ var ( DefaultTerminationGracePeriodSeconds = int64(30) DefaultActiveDeadlineSeconds = int64(3600) DefaultTime = time.Now().UTC() - DefaultHostNameSuffix = "testHostNameSuffix" - DefaultCertNameSuffix = "testHostNameSuffix" ) func DefaultSubmissionConfig() configuration.SubmissionConfig { @@ -74,6 +72,77 @@ func DefaultSubmissionConfig() configuration.SubmissionConfig { } } +func CreatePreemptJobSequenceEvents(jobIds []string) []*armadaevents.EventSequence_Event { + events := make([]*armadaevents.EventSequence_Event, len(jobIds)) + for i, jobId := range jobIds { + events[i] = &armadaevents.EventSequence_Event{ + Created: &DefaultTime, + Event: &armadaevents.EventSequence_Event_JobPreemptionRequested{ + JobPreemptionRequested: &armadaevents.JobPreemptionRequested{ + JobId: armadaevents.MustProtoUuidFromUlidString(jobId), + JobIdStr: jobId, + }, + }, + } + } + return events +} + +func CreateCancelJobSequenceEvents(jobIds []string) []*armadaevents.EventSequence_Event { + events := make([]*armadaevents.EventSequence_Event, len(jobIds)) + for i, jobId := range jobIds { + events[i] = &armadaevents.EventSequence_Event{ + Created: &DefaultTime, + Event: &armadaevents.EventSequence_Event_CancelJob{ + CancelJob: &armadaevents.CancelJob{ + JobId: armadaevents.MustProtoUuidFromUlidString(jobId), + JobIdStr: jobId, + }, + }, + } + } + return events +} + +func CreateCancelJobSetSequenceEvent() *armadaevents.EventSequence_Event { + return &armadaevents.EventSequence_Event{ + Created: &DefaultTime, + Event: &armadaevents.EventSequence_Event_CancelJobSet{ + CancelJobSet: &armadaevents.CancelJobSet{ + States: []armadaevents.JobState{}, + }, + }, + } +} + +func CreateReprioritizeJobSequenceEvents(jobIds []string, newPriority float64) []*armadaevents.EventSequence_Event { + events := make([]*armadaevents.EventSequence_Event, len(jobIds)) + for i, jobId := range jobIds { + events[i] = &armadaevents.EventSequence_Event{ + Created: &DefaultTime, + Event: &armadaevents.EventSequence_Event_ReprioritiseJob{ + ReprioritiseJob: &armadaevents.ReprioritiseJob{ + JobId: armadaevents.MustProtoUuidFromUlidString(jobId), + JobIdStr: jobId, + Priority: uint32(newPriority), + }, + }, + } + } + return events +} + +func CreateReprioritizedJobSetSequenceEvent(newPriority float64) *armadaevents.EventSequence_Event { + return &armadaevents.EventSequence_Event{ + Created: &DefaultTime, + Event: &armadaevents.EventSequence_Event_ReprioritiseJobSet{ + ReprioritiseJobSet: &armadaevents.ReprioritiseJobSet{ + Priority: uint32(newPriority), + }, + }, + } +} + func NEventSequenceEvents(n int) []*armadaevents.EventSequence_Event { events := make([]*armadaevents.EventSequence_Event, n) for i := 0; i < n; i++ { @@ -114,8 +183,10 @@ func JobSubmitRequestItem(i int) *api.JobSubmitRequestItem { } func SubmitJob(i int) *armadaevents.SubmitJob { + jobId := TestUlid(i) return &armadaevents.SubmitJob{ - JobId: TestUlid(i), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Priority: DefaultPriorityInt, ObjectMeta: &armadaevents.ObjectMeta{Namespace: DefaultNamespace}, Objects: []*armadaevents.KubernetesObject{}, diff --git a/internal/armada/submit/job_set_filter.go b/internal/armada/submit/validation/job_set.go similarity index 61% rename from internal/armada/submit/job_set_filter.go rename to internal/armada/submit/validation/job_set.go index 23192a88683..9c78e976a83 100644 --- a/internal/armada/submit/job_set_filter.go +++ b/internal/armada/submit/validation/job_set.go @@ -1,8 +1,9 @@ -package submit +package validation import ( "fmt" + "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/pkg/api" ) @@ -32,3 +33,26 @@ func ValidateJobSetFilter(filter *api.JobSetFilter) error { return nil } + +type JobSetRequest interface { + GetJobSetId() string + GetQueue() string +} + +func ValidateQueueAndJobSet(req JobSetRequest) error { + if req.GetQueue() == "" { + return &armadaerrors.ErrInvalidArgument{ + Name: "Queue", + Value: req.GetQueue(), + Message: "queue cannot be empty", + } + } + if req.GetJobSetId() == "" { + return &armadaerrors.ErrInvalidArgument{ + Name: "JobSetId", + Value: req.GetJobSetId(), + Message: "jobset cannot be empty", + } + } + return nil +} diff --git a/internal/armada/submit/job_set_filter_test.go b/internal/armada/submit/validation/job_set_test.go similarity index 98% rename from internal/armada/submit/job_set_filter_test.go rename to internal/armada/submit/validation/job_set_test.go index 428c8293861..2ffcbec8d94 100644 --- a/internal/armada/submit/job_set_filter_test.go +++ b/internal/armada/submit/validation/job_set_test.go @@ -1,4 +1,4 @@ -package submit +package validation import ( "testing" diff --git a/internal/armada/submit/validation/validation.go b/internal/armada/submit/validation/submit_request.go similarity index 93% rename from internal/armada/submit/validation/validation.go rename to internal/armada/submit/validation/submit_request.go index af078cce1a2..04591aa0981 100644 --- a/internal/armada/submit/validation/validation.go +++ b/internal/armada/submit/validation/submit_request.go @@ -34,12 +34,13 @@ var ( validateTerminationGracePeriod, validateIngresses, validatePorts, + validateClientId, } ) // ValidateSubmitRequest ensures that the incoming api.JobSubmitRequest is well-formed. It achieves this // by applying a series of validators that each check a single aspect of the request. Validators may -// chose to validate the whole obSubmitRequest or just a single JobSubmitRequestItem. +// choose to validate the whole obSubmitRequest or just a single JobSubmitRequestItem. // This function will return the error from the first validator that fails, or nil if all validators pass. func ValidateSubmitRequest(req *api.JobSubmitRequest, config configuration.SubmissionConfig) error { for _, validationFunc := range requestValidators { @@ -179,6 +180,15 @@ func validateAffinity(j *api.JobSubmitRequestItem, _ configuration.SubmissionCon return nil } +// Ensures that if a request specifies a ClientId, that clientID is not too long +func validateClientId(j *api.JobSubmitRequestItem, _ configuration.SubmissionConfig) error { + const maxClientIdChars = 100 + if len(j.GetClientId()) > maxClientIdChars { + return fmt.Errorf("client id of length %d is greater than max allowed length of %d", len(j.ClientId), maxClientIdChars) + } + return nil +} + // Ensures that if a request specifies a PriorityClass, that priority class is supported by Armada. func validatePriorityClasses(j *api.JobSubmitRequestItem, config configuration.SubmissionConfig) error { spec := j.GetMainPodSpec() @@ -246,8 +256,8 @@ type jobAdapter struct { *api.JobSubmitRequestItem } -// GetPriorityClassName is needed to fulfil the MinimalJob interface -func (j jobAdapter) GetPriorityClassName() string { +// PriorityClassName is needed to fulfil the MinimalJob interface +func (j jobAdapter) PriorityClassName() string { podSpec := j.GetMainPodSpec() if podSpec != nil { return j.GetMainPodSpec().PriorityClassName @@ -255,6 +265,11 @@ func (j jobAdapter) GetPriorityClassName() string { return "" } +// Annotations is needed to fulfil the MinimalJob interface +func (j jobAdapter) Annotations() map[string]string { + return j.GetAnnotations() +} + // Ensures that any gang jobs defined in the request are consistent. This checks that all jobs in the same gang have // the same: // - Cardinality @@ -278,12 +293,6 @@ func validateGangs(request *api.JobSubmitRequest, _ configuration.SubmissionConf actual.Id, expected.Cardinality, actual.Cardinality, ) } - if expected.MinimumCardinality != actual.MinimumCardinality { - return errors.Errorf( - "inconsistent gang minimum cardinality in gang %s: expected %d but got %d", - actual.Id, expected.MinimumCardinality, actual.MinimumCardinality, - ) - } if expected.PriorityClassName != actual.PriorityClassName { return errors.Errorf( "inconsistent PriorityClassName in gang %s: expected %s but got %s", diff --git a/internal/armada/submit/validation/validation_test.go b/internal/armada/submit/validation/submit_request_test.go similarity index 96% rename from internal/armada/submit/validation/validation_test.go rename to internal/armada/submit/validation/submit_request_test.go index 4a6dff1698e..396cdbfe41a 100644 --- a/internal/armada/submit/validation/validation_test.go +++ b/internal/armada/submit/validation/submit_request_test.go @@ -2,6 +2,7 @@ package validation import ( "strconv" + "strings" "testing" "time" @@ -140,18 +141,6 @@ func TestValidateGangs(t *testing.T) { }, expectSuccess: true, }, - "complete gang job of cardinality 2 with minimum cardinality of 1": { - jobRequests: []*api.JobSubmitRequestItem{ - { - Annotations: map[string]string{ - configuration.GangIdAnnotation: "foo", - configuration.GangCardinalityAnnotation: strconv.Itoa(2), - configuration.GangMinimumCardinalityAnnotation: strconv.Itoa(1), - }, - }, - }, - expectSuccess: true, - }, "empty gangId": { jobRequests: []*api.JobSubmitRequestItem{ { @@ -727,6 +716,40 @@ func TestValidatePriorityClasses(t *testing.T) { } } +func TestValidateClientId(t *testing.T) { + tests := map[string]struct { + req *api.JobSubmitRequestItem + expectSuccess bool + }{ + "no client id": { + req: &api.JobSubmitRequestItem{}, + expectSuccess: true, + }, + "client id of 100 chars is fine": { + req: &api.JobSubmitRequestItem{ + ClientId: strings.Repeat("a", 100), + }, + expectSuccess: true, + }, + "client id over 100 chars is forbidden": { + req: &api.JobSubmitRequestItem{ + ClientId: strings.Repeat("a", 101), + }, + expectSuccess: false, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := validateClientId(tc.req, configuration.SubmissionConfig{}) + if tc.expectSuccess { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + func TestValidateQueue(t *testing.T) { tests := map[string]struct { req *api.JobSubmitRequest diff --git a/internal/armadactl/analyze.go b/internal/armadactl/analyze.go deleted file mode 100644 index 650c0861684..00000000000 --- a/internal/armadactl/analyze.go +++ /dev/null @@ -1,49 +0,0 @@ -package armadactl - -import ( - "encoding/json" - "fmt" - "reflect" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client" - "github.com/armadaproject/armada/pkg/client/domain" -) - -func (a *App) Analyze(queue string, jobSetId string) error { - fmt.Fprintf(a.Out, "Querying queue %s for job set %s\n", queue, jobSetId) - return client.WithEventClient(a.Params.ApiConnectionDetails, func(ec api.EventClient) error { - events := map[string][]*api.Event{} - var jobState *domain.WatchContext - - client.WatchJobSet(ec, queue, jobSetId, false, true, false, false, armadacontext.Background(), func(state *domain.WatchContext, e api.Event) bool { - events[e.GetJobId()] = append(events[e.GetJobId()], &e) - jobState = state - return false - }) - - if jobState == nil { - fmt.Fprintf(a.Out, "Found no events associated with job set %s in queue %s/n", jobSetId, queue) - return nil - } - - for id, jobInfo := range jobState.GetCurrentState() { - if jobInfo.Status != domain.Succeeded { - jobEvents := events[id] - - fmt.Fprintf(a.Out, "\n") - for _, e := range jobEvents { - data, err := json.Marshal(e) - if err != nil { - fmt.Fprintf(a.Out, "Error marshalling JSON: %s\n", err) - } else { - fmt.Fprintf(a.Out, "%s %s\n", reflect.TypeOf(*e), string(data)) - } - } - fmt.Fprintf(a.Out, "\n") - } - } - return nil - }) -} diff --git a/internal/armadactl/cancel.go b/internal/armadactl/cancel.go index 55a08b2d37e..39cf37b19ae 100644 --- a/internal/armadactl/cancel.go +++ b/internal/armadactl/cancel.go @@ -11,9 +11,7 @@ import ( "github.com/armadaproject/armada/pkg/client" ) -// Cancel cancels a job. -// TODO this method does too much; there should be separate methods to cancel individual jobs and all jobs in a job set -func (a *App) Cancel(queue string, jobSetId string, jobId string) (outerErr error) { +func (a *App) CancelJob(queue string, jobSetId string, jobId string) (outerErr error) { apiConnectionDetails := a.Params.ApiConnectionDetails fmt.Fprintf(a.Out, "Requesting cancellation of jobs matching queue: %s, job set: %s, and job ID: %s\n", queue, jobSetId, jobId) @@ -34,3 +32,24 @@ func (a *App) Cancel(queue string, jobSetId string, jobId string) (outerErr erro return nil }) } + +func (a *App) CancelJobSet(queue string, jobSetId string) (outerErr error) { + apiConnectionDetails := a.Params.ApiConnectionDetails + + fmt.Fprintf(a.Out, "Requesting cancellation of job set matching queue: %s, job set: %s\n", queue, jobSetId) + return client.WithSubmitClient(apiConnectionDetails, func(c api.SubmitClient) error { + ctx, cancel := common.ContextWithDefaultTimeout() + defer cancel() + + _, err := c.CancelJobSet(ctx, &api.JobSetCancelRequest{ + JobSetId: jobSetId, + Queue: queue, + }) + if err != nil { + return errors.Wrapf(err, "error cancelling job set matching queue: %s, job set: %s", queue, jobSetId) + } + + fmt.Fprintf(a.Out, "Requested cancellation for job set %s\n", jobSetId) + return nil + }) +} diff --git a/internal/armadactl/kube.go b/internal/armadactl/kube.go deleted file mode 100644 index d9b63a0399a..00000000000 --- a/internal/armadactl/kube.go +++ /dev/null @@ -1,35 +0,0 @@ -package armadactl - -import ( - "fmt" - "strings" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client" -) - -// Kube prints kubectl commands for querying the pods associated with a particular job identified by -// the given jobId, queueName, jobSetId, and podNumber. -func (a *App) Kube(jobId string, queueName string, jobSetId string, podNumber int, args []string) error { - verb := strings.Join(args, " ") - return client.WithEventClient(a.Params.ApiConnectionDetails, func(c api.EventClient) error { - state := client.GetJobSetState(c, queueName, jobSetId, armadacontext.Background(), true, false, false) - jobInfo := state.GetJobInfo(jobId) - - if jobInfo == nil { - fmt.Fprintf(a.Out, "Could not find job %s.\n", jobId) - return nil - } - - if jobInfo.ClusterId == "" { - fmt.Fprintf(a.Out, "Job %s has not been assigned to a cluster yet.\n", jobId) - return nil - } - - cmd := client.GetKubectlCommand(jobInfo.ClusterId, jobInfo.Job.Namespace, jobId, podNumber, verb) - fmt.Fprintf(a.Out, "%s\n", cmd) - return nil - }) - return nil -} diff --git a/internal/armadactl/reprioritize.go b/internal/armadactl/reprioritize.go index 4c4c94e3ca6..652d268950f 100644 --- a/internal/armadactl/reprioritize.go +++ b/internal/armadactl/reprioritize.go @@ -10,9 +10,33 @@ import ( "github.com/armadaproject/armada/pkg/client" ) -// Reprioritize sets the priority of the job identified by (jobId, queueName, jobSet) to priorityFactor -// TODO We should have separate methods to operate on individual jobs and job sets -func (a *App) Reprioritize(jobId string, queueName string, jobSet string, priorityFactor float64) error { +// ReprioritizeJobSet sets the priority of the jobSet identified by (queueName, jobSet) to priorityFactor +func (a *App) ReprioritizeJobSet(queueName string, jobSet string, priorityFactor float64) error { + return client.WithSubmitClient(a.Params.ApiConnectionDetails, func(c api.SubmitClient) error { + ctx, cancel := common.ContextWithDefaultTimeout() + defer cancel() + + req := api.JobReprioritizeRequest{ + JobSetId: jobSet, + Queue: queueName, + NewPriority: priorityFactor, + } + result, err := c.ReprioritizeJobs(ctx, &req) + if err != nil { + return errors.WithMessagef(err, "error reprioritising jobs matching queue: %s, job set: %s\n", queueName, jobSet) + } + + err = a.writeResults(result.ReprioritizationResults) + if err != nil { + return err + } + + return nil + }) +} + +// Reprioritize sets the priority of the job identified by (jobId) to priorityFactor +func (a *App) ReprioritizeJob(queue string, jobSet string, jobId string, priorityFactor float64) error { return client.WithSubmitClient(a.Params.ApiConnectionDetails, func(c api.SubmitClient) error { var jobIds []string if jobId != "" { @@ -23,14 +47,14 @@ func (a *App) Reprioritize(jobId string, queueName string, jobSet string, priori defer cancel() req := api.JobReprioritizeRequest{ - JobIds: jobIds, + Queue: queue, JobSetId: jobSet, - Queue: queueName, + JobIds: jobIds, NewPriority: priorityFactor, } result, err := c.ReprioritizeJobs(ctx, &req) if err != nil { - return errors.WithMessagef(err, "error reprioritising jobs matching queue: %s, job set: %s, and job ID: %s\n", queueName, jobSet, jobId) + return errors.WithMessagef(err, "error reprioritising jobs matching job ID: %s\n", jobId) } err = a.writeResults(result.ReprioritizationResults) diff --git a/internal/armadactl/resources.go b/internal/armadactl/resources.go deleted file mode 100644 index 8a7f018bc0d..00000000000 --- a/internal/armadactl/resources.go +++ /dev/null @@ -1,21 +0,0 @@ -package armadactl - -import ( - "fmt" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client" -) - -// Resources prints the resources used by the jobs in job set with ID jobSetId in the given queue. -func (a *App) Resources(queueName string, jobSetId string) error { - return client.WithEventClient(a.Params.ApiConnectionDetails, func(c api.EventClient) error { - state := client.GetJobSetState(c, queueName, jobSetId, armadacontext.Background(), true, false, false) - - for _, job := range state.GetCurrentState() { - fmt.Fprintf(a.Out, "Job ID: %v, maximum used resources: %v\n", job.Job.Id, job.MaxUsedResources) - } - return nil - }) -} diff --git a/internal/binoculars/server.go b/internal/binoculars/server.go index 07ded516fa9..bf14abc18f2 100644 --- a/internal/binoculars/server.go +++ b/internal/binoculars/server.go @@ -11,7 +11,6 @@ import ( "github.com/armadaproject/armada/internal/binoculars/server" "github.com/armadaproject/armada/internal/binoculars/service" "github.com/armadaproject/armada/internal/common/auth" - "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/cluster" grpcCommon "github.com/armadaproject/armada/internal/common/grpc" "github.com/armadaproject/armada/pkg/api/binoculars" @@ -39,7 +38,7 @@ func StartUp(config *configuration.BinocularsConfig) (func(), *sync.WaitGroup) { grpcServer := grpcCommon.CreateGrpcServer(config.Grpc.KeepaliveParams, config.Grpc.KeepaliveEnforcementPolicy, authServices, config.Grpc.Tls) - permissionsChecker := authorization.NewPrincipalPermissionChecker( + permissionsChecker := auth.NewPrincipalPermissionChecker( config.Auth.PermissionGroupMapping, config.Auth.PermissionScopeMapping, config.Auth.PermissionClaimMapping, diff --git a/internal/binoculars/server/binoculars.go b/internal/binoculars/server/binoculars.go index 0a08237058f..0a11c59cdb5 100644 --- a/internal/binoculars/server/binoculars.go +++ b/internal/binoculars/server/binoculars.go @@ -9,7 +9,7 @@ import ( "github.com/armadaproject/armada/internal/binoculars/service" "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/pkg/api/binoculars" ) @@ -26,7 +26,7 @@ func NewBinocularsServer(logService service.LogService, cordonService service.Co } func (b *BinocularsServer) Logs(ctx context.Context, request *binoculars.LogRequest) (*binoculars.LogResponse, error) { - principal := authorization.GetPrincipal(ctx) + principal := auth.GetPrincipal(ctx) logLines, err := b.logService.GetLogs(armadacontext.FromGrpcCtx(ctx), &service.LogParams{ Principal: principal, diff --git a/internal/binoculars/service/cordon.go b/internal/binoculars/service/cordon.go index 584da9bf4ca..9456c3cf878 100644 --- a/internal/binoculars/service/cordon.go +++ b/internal/binoculars/service/cordon.go @@ -14,7 +14,7 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/binoculars/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/internal/common/cluster" "github.com/armadaproject/armada/pkg/api/binoculars" @@ -28,13 +28,13 @@ type CordonService interface { type KubernetesCordonService struct { clientProvider cluster.KubernetesClientProvider - permissionChecker authorization.PermissionChecker + permissionChecker auth.PermissionChecker config configuration.CordonConfiguration } func NewKubernetesCordonService( cordonConfig configuration.CordonConfiguration, - permissionsChecker authorization.PermissionChecker, + permissionsChecker auth.PermissionChecker, clientProvider cluster.KubernetesClientProvider, ) *KubernetesCordonService { return &KubernetesCordonService{ @@ -50,7 +50,7 @@ func (c *KubernetesCordonService) CordonNode(ctx *armadacontext.Context, request return status.Errorf(codes.PermissionDenied, err.Error()) } - additionalLabels := templateLabels(c.config.AdditionalLabels, authorization.GetPrincipal(ctx).GetName()) + additionalLabels := templateLabels(c.config.AdditionalLabels, auth.GetPrincipal(ctx).GetName()) patch := createCordonPatch(additionalLabels) patchBytes, err := GetPatchBytes(patch) @@ -91,9 +91,9 @@ func GetPatchBytes(patchData *nodePatch) ([]byte, error) { return json.Marshal(patchData) } -func checkPermission(p authorization.PermissionChecker, ctx *armadacontext.Context, permission permission.Permission) error { +func checkPermission(p auth.PermissionChecker, ctx *armadacontext.Context, permission permission.Permission) error { if !p.UserHasPermission(ctx, permission) { - return fmt.Errorf("user %s does not have permission %s", authorization.GetPrincipal(ctx).GetName(), permission) + return fmt.Errorf("user %s does not have permission %s", auth.GetPrincipal(ctx).GetName(), permission) } return nil } diff --git a/internal/binoculars/service/cordon_test.go b/internal/binoculars/service/cordon_test.go index eadac72fd8e..2168fc3771e 100644 --- a/internal/binoculars/service/cordon_test.go +++ b/internal/binoculars/service/cordon_test.go @@ -21,7 +21,7 @@ import ( "github.com/armadaproject/armada/internal/binoculars/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/pkg/api/binoculars" ) @@ -38,7 +38,7 @@ var ( ) func TestCordonNode(t *testing.T) { - principal := authorization.NewStaticPrincipal("principle", []string{}) + principal := auth.NewStaticPrincipal("principle", []string{}) tests := map[string]struct { additionalLabels map[string]string expectedLabels map[string]string @@ -80,7 +80,7 @@ func TestCordonNode(t *testing.T) { } cordonService, client := setupTest(t, cordonConfig, FakePermissionChecker{ReturnValue: true}) - ctx := authorization.WithPrincipal(context.Background(), principal) + ctx := auth.WithPrincipal(context.Background(), principal) err := cordonService.CordonNode(armadacontext.New(ctx, logrus.NewEntry(logrus.New())), &binoculars.CordonRequest{ NodeName: defaultNode.Name, }) @@ -129,7 +129,7 @@ func TestCordonNode_Unauthenticated(t *testing.T) { assert.Equal(t, statusError.Code(), codes.PermissionDenied) } -func setupTest(t *testing.T, config configuration.CordonConfiguration, permissionChecker authorization.PermissionChecker) (CordonService, *fake.Clientset) { +func setupTest(t *testing.T, config configuration.CordonConfiguration, permissionChecker auth.PermissionChecker) (CordonService, *fake.Clientset) { client := fake.NewSimpleClientset() clientProvider := &FakeClientProvider{FakeClient: client} @@ -145,7 +145,7 @@ type FakePermissionChecker struct { ReturnValue bool } -func (c FakePermissionChecker) UserOwns(ctx context.Context, obj authorization.Owned) (owned bool, ownershipGroups []string) { +func (c FakePermissionChecker) UserOwns(ctx context.Context, obj auth.Owned) (owned bool, ownershipGroups []string) { return c.ReturnValue, []string{} } diff --git a/internal/binoculars/service/logs.go b/internal/binoculars/service/logs.go index ac72215f67e..8bc0f38d1be 100644 --- a/internal/binoculars/service/logs.go +++ b/internal/binoculars/service/logs.go @@ -10,7 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/cluster" "github.com/armadaproject/armada/pkg/api/binoculars" ) @@ -20,7 +20,7 @@ type LogService interface { } type LogParams struct { - Principal authorization.Principal + Principal auth.Principal Namespace string PodName string SinceTime string diff --git a/internal/common/armadacontext/armada_context.go b/internal/common/armadacontext/armada_context.go index ae70b48ffcf..665c19c51f9 100644 --- a/internal/common/armadacontext/armada_context.go +++ b/internal/common/armadacontext/armada_context.go @@ -20,7 +20,7 @@ type Context struct { func Background() *Context { return &Context{ Context: context.Background(), - FieldLogger: logrus.NewEntry(logrus.New()), + FieldLogger: logrus.NewEntry(logrus.StandardLogger()), } } @@ -28,7 +28,7 @@ func Background() *Context { func TODO() *Context { return &Context{ Context: context.TODO(), - FieldLogger: logrus.NewEntry(logrus.New()), + FieldLogger: logrus.NewEntry(logrus.StandardLogger()), } } diff --git a/internal/common/auth/authorization/anonymous.go b/internal/common/auth/anonymous.go similarity index 91% rename from internal/common/auth/authorization/anonymous.go rename to internal/common/auth/anonymous.go index 2043cfb7bf3..dfeeb126143 100644 --- a/internal/common/auth/authorization/anonymous.go +++ b/internal/common/auth/anonymous.go @@ -1,4 +1,4 @@ -package authorization +package auth import "context" diff --git a/internal/armada/server/authorization.go b/internal/common/auth/authorization.go similarity index 84% rename from internal/armada/server/authorization.go rename to internal/common/auth/authorization.go index d3d24f646ff..848332cb2ae 100644 --- a/internal/armada/server/authorization.go +++ b/internal/common/auth/authorization.go @@ -1,11 +1,10 @@ -package server +package auth import ( "fmt" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/pkg/client/queue" ) @@ -16,17 +15,17 @@ type ActionAuthorizer interface { } type Authorizer struct { - permissionChecker authorization.PermissionChecker + permissionChecker PermissionChecker } -func NewAuthorizer(permissionChecker authorization.PermissionChecker) *Authorizer { +func NewAuthorizer(permissionChecker PermissionChecker) *Authorizer { return &Authorizer{ permissionChecker: permissionChecker, } } func (b *Authorizer) AuthorizeAction(ctx *armadacontext.Context, perm permission.Permission) error { - principal := authorization.GetPrincipal(ctx) + principal := GetPrincipal(ctx) if !b.permissionChecker.UserHasPermission(ctx, perm) { return &armadaerrors.ErrUnauthorized{ Principal: principal.GetName(), @@ -44,7 +43,7 @@ func (b *Authorizer) AuthorizeQueueAction( anyPerm permission.Permission, perm queue.PermissionVerb, ) error { - principal := authorization.GetPrincipal(ctx) + principal := GetPrincipal(ctx) hasAnyPerm := b.permissionChecker.UserHasPermission(ctx, anyPerm) hasQueuePerm := principalHasQueuePermissions(principal, queue, perm) if !hasAnyPerm && !hasQueuePerm { @@ -63,7 +62,7 @@ func (b *Authorizer) AuthorizeQueueAction( // principalHasQueuePermissions returns true if the principal has permissions to perform some action, // as specified by the provided verb, for a specific queue, and false otherwise. -func principalHasQueuePermissions(principal authorization.Principal, q queue.Queue, verb queue.PermissionVerb) bool { +func principalHasQueuePermissions(principal Principal, q queue.Queue, verb queue.PermissionVerb) bool { subjects := queue.PermissionSubjects{} for _, group := range principal.GetGroupNames() { subjects = append(subjects, queue.PermissionSubject{ diff --git a/internal/armada/server/authorization_test.go b/internal/common/auth/authorization_test.go similarity index 65% rename from internal/armada/server/authorization_test.go rename to internal/common/auth/authorization_test.go index 3cbbdc45c2c..002d045a50f 100644 --- a/internal/armada/server/authorization_test.go +++ b/internal/common/auth/authorization_test.go @@ -1,4 +1,4 @@ -package server +package auth import ( "context" @@ -9,7 +9,6 @@ import ( "github.com/armadaproject/armada/internal/armada/permissions" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/permission" "github.com/armadaproject/armada/pkg/client/queue" ) @@ -72,8 +71,8 @@ func TestAuthorizer_AuthorizeQueueAction(t *testing.T) { PriorityFactor: 1, } - authorizedPrincipal := authorization.NewStaticPrincipal("alice", []string{"submit-job-group"}) - unauthorizedPrincipcal := authorization.NewStaticPrincipal("alice", []string{}) + authorizedPrincipal := NewStaticPrincipal("alice", []string{"submit-job-group"}) + unauthorizedPrincipcal := NewStaticPrincipal("alice", []string{}) tests := map[string]struct { ctx *armadacontext.Context @@ -81,22 +80,22 @@ func TestAuthorizer_AuthorizeQueueAction(t *testing.T) { expectAuthorized bool }{ "no permissions": { - ctx: armadacontext.FromGrpcCtx(authorization.WithPrincipal(context.Background(), unauthorizedPrincipcal)), + ctx: armadacontext.FromGrpcCtx(WithPrincipal(context.Background(), unauthorizedPrincipcal)), permissionCheckerResult: false, expectAuthorized: false, }, "only has global permission": { - ctx: armadacontext.FromGrpcCtx(authorization.WithPrincipal(context.Background(), unauthorizedPrincipcal)), + ctx: armadacontext.FromGrpcCtx(WithPrincipal(context.Background(), unauthorizedPrincipcal)), permissionCheckerResult: true, expectAuthorized: true, }, "only has queue permission": { - ctx: armadacontext.FromGrpcCtx(authorization.WithPrincipal(context.Background(), authorizedPrincipal)), + ctx: armadacontext.FromGrpcCtx(WithPrincipal(context.Background(), authorizedPrincipal)), permissionCheckerResult: false, expectAuthorized: true, }, "has both queue and global permissions": { - ctx: armadacontext.FromGrpcCtx(authorization.WithPrincipal(context.Background(), authorizedPrincipal)), + ctx: armadacontext.FromGrpcCtx(WithPrincipal(context.Background(), authorizedPrincipal)), permissionCheckerResult: true, expectAuthorized: true, }, @@ -115,47 +114,11 @@ func TestAuthorizer_AuthorizeQueueAction(t *testing.T) { } } -type FakeActionAuthorizer struct{} - -func (c *FakeActionAuthorizer) AuthorizeAction(ctx *armadacontext.Context, anyPerm permission.Permission) error { - return nil -} - -func (c *FakeActionAuthorizer) AuthorizeQueueAction( - ctx *armadacontext.Context, - queue queue.Queue, - anyPerm permission.Permission, - perm queue.PermissionVerb, -) error { - return nil -} - -type FakeDenyAllActionAuthorizer struct{} - -func (c *FakeDenyAllActionAuthorizer) AuthorizeAction(ctx *armadacontext.Context, anyPerm permission.Permission) error { - return &armadaerrors.ErrUnauthorized{ - Principal: authorization.GetPrincipal(ctx).GetName(), - Message: "permission denied", - } -} - -func (c *FakeDenyAllActionAuthorizer) AuthorizeQueueAction( - ctx *armadacontext.Context, - queue queue.Queue, - anyPerm permission.Permission, - perm queue.PermissionVerb, -) error { - return &armadaerrors.ErrUnauthorized{ - Principal: authorization.GetPrincipal(ctx).GetName(), - Message: "permission denied", - } -} - type FakePermissionChecker struct { ReturnValue bool } -func (c FakePermissionChecker) UserOwns(ctx context.Context, obj authorization.Owned) (owned bool, ownershipGroups []string) { +func (c FakePermissionChecker) UserOwns(ctx context.Context, obj Owned) (owned bool, ownershipGroups []string) { return c.ReturnValue, []string{} } diff --git a/internal/common/auth/authorization/basic.go b/internal/common/auth/basic.go similarity index 98% rename from internal/common/auth/authorization/basic.go rename to internal/common/auth/basic.go index 2c44cb5414e..dac30220fdd 100644 --- a/internal/common/auth/authorization/basic.go +++ b/internal/common/auth/basic.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/basic_test.go b/internal/common/auth/basic_test.go similarity index 98% rename from internal/common/auth/authorization/basic_test.go rename to internal/common/auth/basic_test.go index 2a3ad16eb09..bed9f3a6443 100644 --- a/internal/common/auth/authorization/basic_test.go +++ b/internal/common/auth/basic_test.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/common.go b/internal/common/auth/common.go similarity index 99% rename from internal/common/auth/authorization/common.go rename to internal/common/auth/common.go index be1861f2759..97fd664a6fe 100644 --- a/internal/common/auth/authorization/common.go +++ b/internal/common/auth/common.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/common_test.go b/internal/common/auth/common_test.go similarity index 98% rename from internal/common/auth/authorization/common_test.go rename to internal/common/auth/common_test.go index 5d4d451248e..43edf1e9a8f 100644 --- a/internal/common/auth/authorization/common_test.go +++ b/internal/common/auth/common_test.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/kubernetes.go b/internal/common/auth/kubernetes.go similarity index 99% rename from internal/common/auth/authorization/kubernetes.go rename to internal/common/auth/kubernetes.go index 43778f1e009..c81c9da9779 100644 --- a/internal/common/auth/authorization/kubernetes.go +++ b/internal/common/auth/kubernetes.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" @@ -9,14 +9,13 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" "github.com/patrickmn/go-cache" authv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadaerrors" "github.com/armadaproject/armada/internal/common/auth/configuration" diff --git a/internal/common/auth/authorization/kubernetes_test.go b/internal/common/auth/kubernetes_test.go similarity index 98% rename from internal/common/auth/authorization/kubernetes_test.go rename to internal/common/auth/kubernetes_test.go index eef827f9add..d178a8390a3 100644 --- a/internal/common/auth/authorization/kubernetes_test.go +++ b/internal/common/auth/kubernetes_test.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" @@ -13,7 +13,7 @@ import ( "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" authv1 "k8s.io/api/authentication/v1" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/auth/configuration" ) diff --git a/internal/common/auth/authorization/oidc.go b/internal/common/auth/oidc.go similarity index 99% rename from internal/common/auth/authorization/oidc.go rename to internal/common/auth/oidc.go index 9e88a21c5f9..b0219f8732e 100644 --- a/internal/common/auth/authorization/oidc.go +++ b/internal/common/auth/oidc.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/oidc_test.go b/internal/common/auth/oidc_test.go similarity index 98% rename from internal/common/auth/authorization/oidc_test.go rename to internal/common/auth/oidc_test.go index 3ceda1b4238..f79c4d0a1eb 100644 --- a/internal/common/auth/authorization/oidc_test.go +++ b/internal/common/auth/oidc_test.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/permissions.go b/internal/common/auth/permissions.go similarity index 99% rename from internal/common/auth/authorization/permissions.go rename to internal/common/auth/permissions.go index f0b905c9ad8..0c591da44b2 100644 --- a/internal/common/auth/authorization/permissions.go +++ b/internal/common/auth/permissions.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/authorization/permissions_test.go b/internal/common/auth/permissions_test.go similarity index 99% rename from internal/common/auth/authorization/permissions_test.go rename to internal/common/auth/permissions_test.go index b4e53cdc026..9590c5e0896 100644 --- a/internal/common/auth/authorization/permissions_test.go +++ b/internal/common/auth/permissions_test.go @@ -1,4 +1,4 @@ -package authorization +package auth import ( "context" diff --git a/internal/common/auth/setup.go b/internal/common/auth/setup.go index 69e9b106096..f2a05da9a8b 100644 --- a/internal/common/auth/setup.go +++ b/internal/common/auth/setup.go @@ -5,25 +5,24 @@ import ( "github.com/pkg/errors" - "github.com/armadaproject/armada/internal/common/auth/authorization" "github.com/armadaproject/armada/internal/common/auth/configuration" ) -func ConfigureAuth(config configuration.AuthConfig) ([]authorization.AuthService, error) { - var authServices []authorization.AuthService +func ConfigureAuth(config configuration.AuthConfig) ([]AuthService, error) { + var authServices []AuthService if len(config.BasicAuth.Users) > 0 { authServices = append(authServices, - authorization.NewBasicAuthService(config.BasicAuth.Users)) + NewBasicAuthService(config.BasicAuth.Users)) } if config.KubernetesAuth.KidMappingFileLocation != "" { - kubernetesAuthService := authorization.NewKubernetesNativeAuthService(config.KubernetesAuth) + kubernetesAuthService := NewKubernetesNativeAuthService(config.KubernetesAuth) authServices = append(authServices, &kubernetesAuthService) } if config.OpenIdAuth.ProviderUrl != "" { - openIdAuthService, err := authorization.NewOpenIdAuthServiceForProvider(context.Background(), &config.OpenIdAuth) + openIdAuthService, err := NewOpenIdAuthServiceForProvider(context.Background(), &config.OpenIdAuth) if err != nil { return nil, errors.WithMessage(err, "error initialising openId auth") } @@ -31,7 +30,7 @@ func ConfigureAuth(config configuration.AuthConfig) ([]authorization.AuthService } if config.AnonymousAuth { - authServices = append(authServices, &authorization.AnonymousAuthService{}) + authServices = append(authServices, &AnonymousAuthService{}) } if len(authServices) == 0 { diff --git a/internal/common/database/lookout/jobstates.go b/internal/common/database/lookout/jobstates.go index b3df64386c1..3c97e00cb77 100644 --- a/internal/common/database/lookout/jobstates.go +++ b/internal/common/database/lookout/jobstates.go @@ -36,6 +36,7 @@ const ( JobRunLeaseExpired JobRunState = "RUN_LEASE_EXPIRED" JobRunMaxRunsExceeded JobRunState = "RUN_MAX_RUNS_EXCEEDED" JobRunLeased JobRunState = "RUN_LEASED" + JobRunCancelled JobRunState = "RUN_CANCELLED" JobRunPendingOrdinal = 1 JobRunRunningOrdinal = 2 @@ -48,6 +49,7 @@ const ( JobRunLeaseExpiredOrdinal = 9 JobRunMaxRunsExceededOrdinal = 10 JobRunLeasedOrdinal = 11 + JobRunCancelledOrdinal = 12 ) var ( @@ -82,6 +84,7 @@ var ( JobRunRunningOrdinal: JobRunRunning, JobRunSucceededOrdinal: JobRunSucceeded, JobRunFailedOrdinal: JobRunFailed, + JobRunCancelledOrdinal: JobRunCancelled, JobRunTerminatedOrdinal: JobRunTerminated, JobRunPreemptedOrdinal: JobRunPreempted, JobRunUnableToScheduleOrdinal: JobRunUnableToSchedule, diff --git a/internal/common/eventutil/eventutil.go b/internal/common/eventutil/eventutil.go index ec2e8230ee3..c83786f868f 100644 --- a/internal/common/eventutil/eventutil.go +++ b/internal/common/eventutil/eventutil.go @@ -2,7 +2,6 @@ package eventutil import ( "fmt" - "math" "time" "github.com/gogo/protobuf/proto" @@ -15,10 +14,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/executor/configuration" - "github.com/armadaproject/armada/internal/executor/domain" - executorutil "github.com/armadaproject/armada/internal/executor/util" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -84,26 +79,6 @@ func ShortSequenceString(sequence *armadaevents.EventSequence) string { return s } -// ApiJobsFromLogSubmitJobs converts a slice of log jobs to API jobs. -func ApiJobsFromLogSubmitJobs( - userId string, - groups []string, - queueName string, - jobSetName string, - time time.Time, - es []*armadaevents.SubmitJob, -) ([]*api.Job, error) { - jobs := make([]*api.Job, len(es)) - for i, e := range es { - job, err := ApiJobFromLogSubmitJob(userId, groups, queueName, jobSetName, time, e) - if err != nil { - return nil, err - } - jobs[i] = job - } - return jobs, nil -} - // ApiJobFromLogSubmitJob converts a SubmitJob log message into an api.Job struct, which is used by Armada internally. func ApiJobFromLogSubmitJob(ownerId string, groups []string, queueName string, jobSetName string, time time.Time, e *armadaevents.SubmitJob) (*api.Job, error) { jobId, err := armadaevents.UlidStringFromProtoUuid(e.JobId) @@ -190,176 +165,9 @@ func ApiJobFromLogSubmitJob(ownerId string, groups []string, queueName string, j Created: time, Owner: ownerId, QueueOwnershipUserGroups: groups, - QueueTtlSeconds: e.QueueTtlSeconds, }, nil } -// LogSubmitJobFromApiJob converts an API job to a log job. -// Note that PopulateK8sServicesIngresses must be called first if job.Services and job.Ingress -// is to be included in the resulting log job, since the log job can only include k8s objects -// (i.e., not the API-specific job.Services or job.Ingress). -func LogSubmitJobFromApiJob(job *api.Job) (*armadaevents.SubmitJob, error) { - if job.PodSpec != nil && len(job.PodSpecs) != 0 { - return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ - Name: "PodSpecs", - Value: job.PodSpecs, - Message: "Both PodSpec and PodSpecs are set", - }) - } - jobId, err := armadaevents.ProtoUuidFromUlidString(job.GetId()) - if err != nil { - return nil, err - } - priority := LogSubmitPriorityFromApiPriority(job.GetPriority()) - mainObject, objects, err := LogSubmitObjectsFromApiJob(job) - if err != nil { - return nil, err - } - return &armadaevents.SubmitJob{ - JobId: jobId, - DeduplicationId: job.GetClientId(), - Priority: priority, - ObjectMeta: &armadaevents.ObjectMeta{ - ExecutorId: "", // Not set by the job - Namespace: job.GetNamespace(), - Annotations: job.GetAnnotations(), - Labels: job.GetLabels(), - }, - MainObject: mainObject, - Objects: objects, - Scheduler: job.Scheduler, - QueueTtlSeconds: job.QueueTtlSeconds, - }, nil -} - -// LogSubmitObjectsFromApiJob extracts all objects from an API job for inclusion in a log job. -// -// To extract services and ingresses, PopulateK8sServicesIngresses must be called on the job first -// to convert API-specific job objects to proper K8s objects. -func LogSubmitObjectsFromApiJob(job *api.Job) (*armadaevents.KubernetesMainObject, []*armadaevents.KubernetesObject, error) { - // Objects part of the job in addition to the main object. - objects := make([]*armadaevents.KubernetesObject, 0, len(job.Services)+len(job.Ingress)+len(job.PodSpecs)) - - // Each job has a main object associated with it, which determines when the job exits. - // If provided, use job.PodSpec as the main object. Otherwise, try to use job.PodSpecs[0]. - mainPodSpec := job.PodSpec - additionalPodSpecs := job.PodSpecs - if additionalPodSpecs == nil { - additionalPodSpecs = make([]*v1.PodSpec, 0) - } - if mainPodSpec == nil && len(additionalPodSpecs) > 0 { - mainPodSpec = additionalPodSpecs[0] - additionalPodSpecs = additionalPodSpecs[1:] - } - - // Job must contain at least one podspec. - if mainPodSpec == nil { - err := errors.WithStack(&armadaerrors.ErrInvalidArgument{ - Name: "PodSpec", - Value: nil, - Message: "job doesn't contain any podspecs", - }) - return nil, nil, err - } - - mainObject := &armadaevents.KubernetesMainObject{ - Object: &armadaevents.KubernetesMainObject_PodSpec{ - PodSpec: &armadaevents.PodSpecWithAvoidList{ - PodSpec: mainPodSpec, - }, - }, - } - - // Collect all additional objects. - for _, podSpec := range additionalPodSpecs { - objects = append(objects, &armadaevents.KubernetesObject{ - Object: &armadaevents.KubernetesObject_PodSpec{ - PodSpec: &armadaevents.PodSpecWithAvoidList{ - PodSpec: podSpec, - }, - }, - }) - } - for _, service := range job.K8SService { - objects = append(objects, &armadaevents.KubernetesObject{ - ObjectMeta: LogObjectMetaFromK8sObjectMeta(&service.ObjectMeta), - Object: &armadaevents.KubernetesObject_Service{ - Service: &service.Spec, - }, - }) - } - for _, ingress := range job.K8SIngress { - objects = append(objects, &armadaevents.KubernetesObject{ - ObjectMeta: LogObjectMetaFromK8sObjectMeta(&ingress.ObjectMeta), - Object: &armadaevents.KubernetesObject_Ingress{ - Ingress: &ingress.Spec, - }, - }) - } - - return mainObject, objects, nil -} - -// PopulateK8sServicesIngresses converts the API-specific service and ingress object into K8s objects -// and stores those in the job object. -func PopulateK8sServicesIngresses(job *api.Job, ingressConfig *configuration.IngressConfiguration) error { - services, ingresses, err := K8sServicesIngressesFromApiJob(job, ingressConfig) - if err != nil { - return err - } - job.K8SService = services - job.K8SIngress = ingresses - return nil -} - -// K8sServicesIngressesFromApiJob converts job.Services and job.Ingress to k8s services and ingresses. -func K8sServicesIngressesFromApiJob(job *api.Job, ingressConfig *configuration.IngressConfiguration) ([]*v1.Service, []*networking.Ingress, error) { - // GenerateIngresses (below) looks into the pod to set names for the services/ingresses. - // Hence, we use the same code as is later used by the executor to create the pod to be submitted. - // Note that we only create the pod here to pass it to GenerateIngresses. - // TODO: This only works for a single pod; I think we should create services/ingresses for each pod in the request (Albin). - pod := executorutil.CreatePod(job, &configuration.PodDefaults{}) - pod.Annotations = util.MergeMaps(pod.Annotations, map[string]string{ - domain.HasIngress: "true", - domain.AssociatedServicesCount: fmt.Sprintf("%d", len(job.Services)), - domain.AssociatedIngressesCount: fmt.Sprintf("%d", len(job.Ingress)), - }) - - // Create k8s objects from the data embedded in the request. - // GenerateIngresses expects a job object and a pod because it looks into those for optimisations. - // For example, it deletes services/ingresses for which there are no corresponding ports exposed in the PodSpec. - // Note that the user may submit several pods, but we only pass in one of them as a separate argument. - // I think this may result in Armada deleting services/ingresses needed for pods other than the first one - // - Albin - services, ingresses := executorutil.GenerateIngresses(job, pod, ingressConfig) - - return services, ingresses, nil -} - -// LogSubmitPriorityFromApiPriority returns the uint32 representation of the priority included with a submitted job, -// or an error if the conversion fails. -func LogSubmitPriorityFromApiPriority(priority float64) uint32 { - if priority < 0 { - priority = 0 - } - if priority > math.MaxUint32 { - priority = math.MaxUint32 - } - priority = math.Round(priority) - return uint32(priority) -} - -func LogObjectMetaFromK8sObjectMeta(meta *metav1.ObjectMeta) *armadaevents.ObjectMeta { - return &armadaevents.ObjectMeta{ - ExecutorId: "", // Not part of the k8s ObjectMeta. - Namespace: meta.GetNamespace(), - Name: meta.GetName(), - KubernetesId: string(meta.GetUID()), // The type returned by GetUID is an alias of string. - Annotations: meta.GetAnnotations(), - Labels: meta.GetLabels(), - } -} - func K8sObjectMetaFromLogObjectMeta(meta *armadaevents.ObjectMeta) *metav1.ObjectMeta { return &metav1.ObjectMeta{ Namespace: meta.GetNamespace(), @@ -459,33 +267,39 @@ func LimitSequencesByteSize(sequences []*armadaevents.EventSequence, sizeInBytes return rv, nil } -// LimitSequenceByteSize returns a slice of sequences produced by breaking up sequence.Events -// into separate sequences, each of which is at most MAX_SEQUENCE_SIZE_IN_BYTES bytes in size. -func LimitSequenceByteSize(sequence *armadaevents.EventSequence, maxSequenceSizeInBytes uint, strict bool) ([]*armadaevents.EventSequence, error) { +// This is an (over)estimate of the byte overhead used to represent the list EventSequence.Events +// We need this get a safe estimate for the headerSize in LimitSequenceByteSize +// We cannot simply rely on proto.Size on an EventSequence with an empty Event list, +// as proto is smart enough to realise it is empty and just nils it out for 0 bytes +const sequenceEventListOverheadSizeBytes = 100 + +// LimitSequenceByteSize returns a slice of sequences produced by breaking up sequence.Events into separate sequences +// If strict is true, each sequence will be at most sizeInBytes bytes in size +// If strict is false, sizeInBytes can be exceeded by at most the size of a single sequence.Event +func LimitSequenceByteSize(sequence *armadaevents.EventSequence, sizeInBytes uint, strict bool) ([]*armadaevents.EventSequence, error) { // Compute the size of the sequence without events. events := sequence.Events sequence.Events = make([]*armadaevents.EventSequence_Event, 0) + headerSize := uint(proto.Size(sequence)) + sequenceEventListOverheadSizeBytes sequence.Events = events - // var currentSequence *armadaevents.EventSequence sequences := make([]*armadaevents.EventSequence, 0, 1) + lastSequenceEventSize := uint(0) for _, event := range sequence.Events { - if len(sequences) == 0 { - sequences = append(sequences, &armadaevents.EventSequence{ - Queue: sequence.Queue, - JobSetName: sequence.JobSetName, - UserId: sequence.UserId, - Groups: sequence.Groups, - Events: nil, + eventSize := uint(proto.Size(event)) + if eventSize+headerSize > sizeInBytes && strict { + return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ + Name: "sequence", + Value: sequence, + Message: fmt.Sprintf( + "event of %d bytes is too large, when combined with a header of size %d is larger than the sequence size limit of %d", + eventSize, + headerSize, + sizeInBytes, + ), }) } - lastSequence := sequences[len(sequences)-1] - lastSequence.Events = append(lastSequence.Events, event) - sequenceSizeInBytes := uint(proto.Size(lastSequence)) - - if sequenceSizeInBytes > maxSequenceSizeInBytes { - // Event makes sequence too large, remove event and make a new sequence - lastSequence.Events = lastSequence.Events[:len(lastSequence.Events)-1] + if len(sequences) == 0 || lastSequenceEventSize+eventSize+headerSize > sizeInBytes { sequences = append(sequences, &armadaevents.EventSequence{ Queue: sequence.Queue, JobSetName: sequence.JobSetName, @@ -493,35 +307,11 @@ func LimitSequenceByteSize(sequence *armadaevents.EventSequence, maxSequenceSize Groups: sequence.Groups, Events: nil, }) - - lastSequence = sequences[len(sequences)-1] - lastSequence.Events = append(lastSequence.Events, event) - sequenceSizeInBytes = uint(proto.Size(lastSequence)) - - if sequenceSizeInBytes > maxSequenceSizeInBytes && strict { - eventSize := uint(proto.Size(event)) - return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ - Name: "sequence", - Value: sequence, - Message: fmt.Sprintf( - "event of %d bytes is too large, preventing the creation of a sequence with size limit %d", - eventSize, - maxSequenceSizeInBytes, - ), - }) - } + lastSequenceEventSize = 0 } + lastSequence := sequences[len(sequences)-1] + lastSequence.Events = append(lastSequence.Events, event) + lastSequenceEventSize += eventSize } return sequences, nil } - -// LEGACY_RUN_ID is used for messages for which we can't use the kubernetesId. -const LEGACY_RUN_ID = "00000000-0000-0000-0000-000000000000" - -func LegacyJobRunId() *armadaevents.Uuid { - jobRunId, err := armadaevents.ProtoUuidFromUuidString(LEGACY_RUN_ID) - if err != nil { - panic(err) - } - return jobRunId -} diff --git a/internal/common/eventutil/eventutil_test.go b/internal/common/eventutil/eventutil_test.go index d92bd6cc182..e1015b00359 100644 --- a/internal/common/eventutil/eventutil_test.go +++ b/internal/common/eventutil/eventutil_test.go @@ -1,380 +1,15 @@ package eventutil import ( - "fmt" "testing" - "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - networking "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/executor/configuration" - "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" ) -func TestConvertLogObjectMeta(t *testing.T) { - expected := &armadaevents.ObjectMeta{ - ExecutorId: "", // Can't be part of the test since the k8s ObjectMeta doesn't include it. - Namespace: "namespace", - Name: "name", - KubernetesId: "id", - Annotations: map[string]string{"annotation_1": "annotation_1", "annotation_2": "annotation_2"}, - Labels: map[string]string{"label_1": "label_1", "label_2": "label_2"}, - } - k8sObjectMeta := K8sObjectMetaFromLogObjectMeta(expected) - actual := LogObjectMetaFromK8sObjectMeta(k8sObjectMeta) - assert.Equal(t, expected, actual) -} - -func TestConvertK8sObjectMeta(t *testing.T) { - expected := &metav1.ObjectMeta{ - Namespace: "namespace", - Name: "name", - UID: "id", - Annotations: map[string]string{"annotation_1": "annotation_1", "annotation_2": "annotation_2"}, - Labels: map[string]string{"label_1": "label_1", "label_2": "label_2"}, - } - logObjectMeta := LogObjectMetaFromK8sObjectMeta(expected) - actual := K8sObjectMetaFromLogObjectMeta(logObjectMeta) - assert.Equal(t, expected, actual) -} - -func TestConvertJobErrors(t *testing.T) { - apiJob := testJob(false) - apiJob.PodSpec = nil - apiJob.PodSpecs = nil - _, err := LogSubmitJobFromApiJob(apiJob) - assert.Error(t, err) -} - -func TestK8sServicesIngressesFromApiJob(t *testing.T) { - apiJob := testJob(false) - ingressConfig := &configuration.IngressConfiguration{ - HostnameSuffix: "HostnameSuffix", - CertNameSuffix: "CertNameSuffix", - Annotations: map[string]string{"ingress_annotation_1": "ingress_annotation_1", "ingress_annotation_2": "ingress_annotation_2"}, - } - - services, ingresses, err := K8sServicesIngressesFromApiJob(apiJob, ingressConfig) - if !assert.NoError(t, err) { - t.FailNow() - } - - if !assert.Equal(t, 3, len(services)) { - t.FailNow() - } - - if !assert.Equal(t, 1, len(ingresses)) { - t.FailNow() - } - - expectedLabels := util.MergeMaps( - apiJob.Labels, - map[string]string{"armada_job_id": apiJob.GetId(), "armada_pod_number": "0", "armada_queue_id": apiJob.GetQueue()}, - ) - expectedServiceAnnotations := util.MergeMaps( - apiJob.Annotations, - map[string]string{"armada_jobset_id": apiJob.GetJobSetId(), "armada_owner": apiJob.GetOwner()}, - ) - - expectedServices := make(map[string]*v1.Service) - for _, suffix := range []string{"ingress", "nodeport", "headless"} { - name := fmt.Sprintf("armada-%s-0-%s", apiJob.GetId(), suffix) - port := int32(5000) - clusterIP := v1.ClusterIPNone - serviceType := v1.ServiceTypeClusterIP - if suffix == "nodeport" { - port = 6000 - clusterIP = "" - serviceType = v1.ServiceTypeNodePort - } else if suffix == "headless" { - port = 7000 - } - expectedServices[name] = &v1.Service{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("armada-%s-0-%s", apiJob.GetId(), suffix), - Namespace: apiJob.Namespace, - Labels: expectedLabels, - Annotations: expectedServiceAnnotations, - OwnerReferences: nil, - Finalizers: nil, - ManagedFields: nil, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: fmt.Sprintf("%s_%s-%d", "podSpec", "container1", port), - Protocol: v1.ProtocolTCP, - Port: port, - }, - { - Name: fmt.Sprintf("%s_%s-%d", "podSpec", "container2", port), - Protocol: v1.ProtocolTCP, - Port: port, - }, - }, - Selector: map[string]string{ - "armada_job_id": apiJob.GetId(), "armada_pod_number": "0", "armada_queue_id": "queue", - }, - ClusterIP: clusterIP, - Type: serviceType, - }, - } - } - - expectedIngressAnnotations := util.MergeMaps( - expectedServiceAnnotations, - ingressConfig.Annotations, - ) - - expectedIngressRules := make([]networking.IngressRule, 2) - pathType := networking.PathTypePrefix - for i, container := range apiJob.PodSpec.Containers { - expectedIngressRules[i] = networking.IngressRule{ - Host: fmt.Sprintf("%s-%d-armada-%s-0.%s.%s", - container.Name, 5000, apiJob.GetId(), apiJob.GetNamespace(), ingressConfig.HostnameSuffix), - IngressRuleValue: networking.IngressRuleValue{ - HTTP: &networking.HTTPIngressRuleValue{ - Paths: []networking.HTTPIngressPath{ - { - Path: "/", - PathType: &pathType, - Backend: networking.IngressBackend{ - Service: &networking.IngressServiceBackend{ - Name: fmt.Sprintf("armada-%s-0-ingress", apiJob.GetId()), - Port: networking.ServiceBackendPort{ - Number: 5000, - }, - }, - }, - }, - }, - }, - }, - } - } - - expectedIngress := &networking.Ingress{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("armada-%s-%d-ingress-%d", apiJob.GetId(), 0, 0), - Namespace: apiJob.GetNamespace(), - Labels: expectedLabels, - Annotations: expectedIngressAnnotations, - }, - Spec: networking.IngressSpec{ - IngressClassName: nil, - DefaultBackend: nil, - TLS: []networking.IngressTLS{ - { - Hosts: []string{ - fmt.Sprintf("podSpec_container1-5000-armada-%s-0.%s.%s", apiJob.GetId(), apiJob.GetNamespace(), ingressConfig.HostnameSuffix), - fmt.Sprintf("podSpec_container2-5000-armada-%s-0.%s.%s", apiJob.GetId(), apiJob.GetNamespace(), ingressConfig.HostnameSuffix), - }, - SecretName: fmt.Sprintf("%s-%s", apiJob.Namespace, ingressConfig.CertNameSuffix), - }, - }, - Rules: expectedIngressRules, - }, - Status: networking.IngressStatus{}, - } - - for _, service := range services { - expected, ok := expectedServices[service.Name] - if !assert.Truef(t, ok, "got unexpected service name %s", service.Name) { - t.FailNow() - } - assert.Equal(t, expected, service) - } - - if !assert.Equal(t, expectedIngress, ingresses[0]) { - t.FailNow() - } -} - -func TestConvertJobSinglePodSpec(t *testing.T) { - expected := testJob(false) - - ingressConfig := &configuration.IngressConfiguration{ - HostnameSuffix: "HostnameSuffix", - CertNameSuffix: "CertNameSuffix", - Annotations: map[string]string{"ingress_annotation_1": "ingress_annotation_1", "ingress_annotation_2": "ingress_annotation_2"}, - } - err := PopulateK8sServicesIngresses(expected, ingressConfig) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - // After converting to K8s objects, we don't need the API-specific objects. - expected.Services = nil - expected.Ingress = nil - - logJob, err := LogSubmitJobFromApiJob(expected) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - actual, err := ApiJobFromLogSubmitJob( - expected.Owner, - expected.QueueOwnershipUserGroups, - expected.Queue, - expected.JobSetId, - expected.Created, - logJob, - ) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - assert.Equal(t, expected, actual) -} - -func TestConvertJobMultiplePodSpecs(t *testing.T) { - expected := testJob(true) - - ingressConfig := &configuration.IngressConfiguration{ - HostnameSuffix: "HostnameSuffix", - CertNameSuffix: "CertNameSuffix", - Annotations: map[string]string{"ingress_annotation_1": "ingress_annotation_1", "ingress_annotation_2": "ingress_annotation_2"}, - } - err := PopulateK8sServicesIngresses(expected, ingressConfig) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - // After converting to K8s objects, we don't need the API-specific objects. - expected.Services = nil - expected.Ingress = nil - - logJob, err := LogSubmitJobFromApiJob(expected) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - actual, err := ApiJobFromLogSubmitJob( - expected.Owner, - expected.QueueOwnershipUserGroups, - expected.Queue, - expected.JobSetId, - expected.Created, - logJob, - ) - if ok := assert.NoError(t, err); !ok { - t.FailNow() - } - - assert.Equal(t, expected, actual) -} - -func testJob(multiplePodSpecs bool) *api.Job { - var mainPodSpec *v1.PodSpec - var podSpec *v1.PodSpec - var podSpecs []*v1.PodSpec - if multiplePodSpecs { - podSpecs = []*v1.PodSpec{testPodSpec("podSpec1"), testPodSpec("podSpec2")} - mainPodSpec = podSpecs[0] - } else { - podSpec = testPodSpec("podSpec") - mainPodSpec = podSpec - } - return &api.Job{ - Id: util.NewULID(), - ClientId: "clientId", - JobSetId: "jobSet", - Queue: "queue", - Namespace: "namespace", - Labels: map[string]string{"label_1": "label_1", "label_2": "label_2"}, - Annotations: map[string]string{"annotation_1": "annotation_1", "annotation_2": "annotation_2"}, - // Deprecated and hence not part of the tests. - // RequiredNodeLabels: map[string]string{}, - Owner: "owner", - QueueOwnershipUserGroups: []string{"group1, group2"}, - Priority: 1, - PodSpec: podSpec, - PodSpecs: podSpecs, - SchedulingResourceRequirements: api.SchedulingResourceRequirementsFromPodSpec(mainPodSpec), - Created: time.Now(), - Ingress: []*api.IngressConfig{ - { - Type: api.IngressType_Ingress, - Ports: []uint32{5000}, - TlsEnabled: true, - }, - }, - Services: []*api.ServiceConfig{ - { - Type: api.ServiceType_NodePort, - Ports: []uint32{6000}, - }, - { - Type: api.ServiceType_Headless, - Ports: []uint32{7000}, - }, - }, - K8SIngress: nil, - K8SService: nil, - } -} - -func testPodSpec(name string) *v1.PodSpec { - return &v1.PodSpec{ - Containers: []v1.Container{testContainer(name + "_" + "container1"), testContainer(name + "_" + "container2")}, - NodeSelector: map[string]string{"nodeselector": "nodeselector_value"}, - Tolerations: []v1.Toleration{ - { - Key: "example.com/default_toleration_1", - Value: "value_1", - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - }, - { - Key: "example.com/default_toleration_2", - Value: "value_2", - Operator: v1.TolerationOpEqual, - Effect: v1.TaintEffectNoSchedule, - }, - }, - } -} - -func testContainer(name string) v1.Container { - cpu, _ := resource.ParseQuantity("80m") - memory, _ := resource.ParseQuantity("50Mi") - return v1.Container{ - Name: name, - Image: "alpine:3.18.3", - Command: []string{"cmd1", "cmd2"}, - Args: []string{"sleep", "5s"}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, - Limits: v1.ResourceList{"cpu": cpu, "memory": memory}, - }, - Ports: []v1.ContainerPort{ - { - ContainerPort: 5000, - Protocol: v1.ProtocolTCP, - Name: "port5000", - }, - { - ContainerPort: 6000, - Protocol: v1.ProtocolTCP, - Name: "port6000", - }, - { - ContainerPort: 7000, - Protocol: v1.ProtocolTCP, - Name: "port7000", - }, - }, - } -} - func TestCompactSequences_Basic(t *testing.T) { sequences := []*armadaevents.EventSequence{ { @@ -622,6 +257,35 @@ func TestCompactSequences_Groups(t *testing.T) { assert.Equal(t, expected, actual) } +func TestSequenceEventListSizeBytes(t *testing.T) { + jobId, err := armadaevents.ProtoUuidFromUlidString(util.ULID().String()) + if !assert.NoError(t, err) { + return + } + + sequence := &armadaevents.EventSequence{ + Queue: "", + UserId: "", + JobSetName: "", + Groups: []string{}, + Events: []*armadaevents.EventSequence_Event{ + { + Event: &armadaevents.EventSequence_Event_CancelledJob{ + CancelledJob: &armadaevents.CancelledJob{ + JobId: jobId, + }, + }, + }, + }, + } + + sequenceSizeBytes := uint(proto.Size(sequence)) + // If this fails, it means that the sequenceEventListOverheadSizeBytes constant is possibly too small + // We are showing our safe estimate of the byte overhead added by the event list in proto is definitely large enough + // by showing it is larger than a sequence with a single event (as that sequence contains the overhead added by the event list) + assert.True(t, sequenceSizeBytes < sequenceEventListOverheadSizeBytes) +} + func TestLimitSequenceByteSize(t *testing.T) { sequence := &armadaevents.EventSequence{ Queue: "queue1", @@ -655,7 +319,6 @@ func TestLimitSequenceByteSize(t *testing.T) { _, err = LimitSequenceByteSize(sequence, 1, false) assert.NoError(t, err) - assert.Equal(t, []*armadaevents.EventSequence{sequence}, actual) expected := make([]*armadaevents.EventSequence, numEvents) for i := 0; i < numEvents; i++ { @@ -675,7 +338,7 @@ func TestLimitSequenceByteSize(t *testing.T) { }, } } - actual, err = LimitSequenceByteSize(sequence, 65, true) + actual, err = LimitSequenceByteSize(sequence, 65+sequenceEventListOverheadSizeBytes, true) if !assert.NoError(t, err) { return } @@ -710,7 +373,7 @@ func TestLimitSequencesByteSize(t *testing.T) { sequences = append(sequences, sequence) } - actual, err := LimitSequencesByteSize(sequences, 65, true) + actual, err := LimitSequencesByteSize(sequences, 65+sequenceEventListOverheadSizeBytes, true) if !assert.NoError(t, err) { return } diff --git a/internal/common/grpc/gateway.go b/internal/common/grpc/gateway.go index b8e87e3e431..b45773b5a71 100644 --- a/internal/common/grpc/gateway.go +++ b/internal/common/grpc/gateway.go @@ -11,12 +11,12 @@ import ( "github.com/go-openapi/runtime/middleware" "github.com/grpc-ecosystem/grpc-gateway/runtime" log "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" protoutil "github.com/armadaproject/armada/internal/common/grpc/protoutils" - "github.com/armadaproject/armada/internal/common/util" ) // CreateGatewayHandler configures the gRPC API gateway @@ -83,7 +83,7 @@ func logRestRequests(h http.Handler) http.Handler { func allowCORS(h http.Handler, corsAllowedOrigins []string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if origin := r.Header.Get("Origin"); origin != "" && util.ContainsString(corsAllowedOrigins, origin) { + if origin := r.Header.Get("Origin"); origin != "" && slices.Contains(corsAllowedOrigins, origin) { w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Access-Control-Allow-Credentials", "true") if r.Method == "OPTIONS" && r.Header.Get("Access-Control-Request-Method") != "" { diff --git a/internal/common/grpc/grpc.go b/internal/common/grpc/grpc.go index 43707dffadf..045c448fc15 100644 --- a/internal/common/grpc/grpc.go +++ b/internal/common/grpc/grpc.go @@ -24,7 +24,7 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" "github.com/armadaproject/armada/internal/common/certs" "github.com/armadaproject/armada/internal/common/grpc/configuration" "github.com/armadaproject/armada/internal/common/requestid" @@ -35,8 +35,9 @@ import ( func CreateGrpcServer( keepaliveParams keepalive.ServerParameters, keepaliveEnforcementPolicy keepalive.EnforcementPolicy, - authServices []authorization.AuthService, + authServices []auth.AuthService, tlsConfig configuration.TlsConfig, + logrusOptions ...grpc_logrus.Option, ) *grpc.Server { // Logging, authentication, etc. are implemented via gRPC interceptors // (i.e., via functions that are called before handling the actual request). @@ -60,19 +61,19 @@ func CreateGrpcServer( grpc_ctxtags.UnaryServerInterceptor(tagsExtractor), requestid.UnaryServerInterceptor(false), armadaerrors.UnaryServerInterceptor(2000), - grpc_logrus.UnaryServerInterceptor(messageDefault), + grpc_logrus.UnaryServerInterceptor(messageDefault, logrusOptions...), ) streamInterceptors = append(streamInterceptors, grpc_ctxtags.StreamServerInterceptor(tagsExtractor), requestid.StreamServerInterceptor(false), armadaerrors.StreamServerInterceptor(2000), - grpc_logrus.StreamServerInterceptor(messageDefault), + grpc_logrus.StreamServerInterceptor(messageDefault, logrusOptions...), ) // Authentication // The provided authServices represents a list of services that can be used to authenticate // the client (e.g., username/password and OpenId). authFunction is a combination of these. - authFunction := authorization.CreateMiddlewareAuthFunction(authServices) + authFunction := auth.CreateMiddlewareAuthFunction(authServices) unaryInterceptors = append(unaryInterceptors, grpc_auth.UnaryServerInterceptor(authFunction)) streamInterceptors = append(streamInterceptors, grpc_auth.StreamServerInterceptor(authFunction)) diff --git a/internal/common/ingest/batch.go b/internal/common/ingest/batch.go index 5607714e235..52284e41b90 100644 --- a/internal/common/ingest/batch.go +++ b/internal/common/ingest/batch.go @@ -5,7 +5,7 @@ import ( "time" log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" ) diff --git a/internal/common/ingest/batch_test.go b/internal/common/ingest/batch_test.go index a906dbc8258..3160303dd54 100644 --- a/internal/common/ingest/batch_test.go +++ b/internal/common/ingest/batch_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/armadacontext" ) diff --git a/internal/common/ingest/testfixtures/event.go b/internal/common/ingest/testfixtures/event.go index b47ca3a26f8..018dc3a2565 100644 --- a/internal/common/ingest/testfixtures/event.go +++ b/internal/common/ingest/testfixtures/event.go @@ -9,7 +9,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" @@ -72,6 +71,7 @@ const ( PodNumber = 6 ExitCode = 322 ErrMsg = "sample error message" + DebugMsg = "sample debug message" LeaseReturnedMsg = "lease returned error message" TerminatedMsg = "test pod terminated message" UnschedulableMsg = "test pod is unschedulable" @@ -259,6 +259,16 @@ var JobRunSucceeded = &armadaevents.EventSequence_Event{ }, } +var JobRunCancelled = &armadaevents.EventSequence_Event{ + Created: &testfixtures.BaseTime, + Event: &armadaevents.EventSequence_Event_JobRunCancelled{ + JobRunCancelled: &armadaevents.JobRunCancelled{ + RunId: RunIdProto, + JobId: JobIdProto, + }, + }, +} + var LeaseReturned = &armadaevents.EventSequence_Event{ Created: &testfixtures.BaseTime, Event: &armadaevents.EventSequence_Event_JobRunErrors{ @@ -305,6 +315,16 @@ var JobCancelled = &armadaevents.EventSequence_Event{ }, } +var JobValidated = &armadaevents.EventSequence_Event{ + Created: &testfixtures.BaseTime, + Event: &armadaevents.EventSequence_Event_JobValidated{ + JobValidated: &armadaevents.JobValidated{ + JobId: JobIdProto, + Pools: []string{"cpu"}, + }, + }, +} + var JobRequeued = &armadaevents.EventSequence_Event{ Created: &BaseTime, Event: &armadaevents.EventSequence_Event_JobRequeued{ @@ -393,7 +413,7 @@ var JobPreemptionRequested = &armadaevents.EventSequence_Event{ }, } -var JobPreempted = &armadaevents.EventSequence_Event{ +var JobRunPreempted = &armadaevents.EventSequence_Event{ Created: &testfixtures.BaseTime, Event: &armadaevents.EventSequence_Event_JobRunPreempted{ JobRunPreempted: &armadaevents.JobRunPreempted{ @@ -414,8 +434,9 @@ var JobRunFailed = &armadaevents.EventSequence_Event{ Terminal: true, Reason: &armadaevents.Error_PodError{ PodError: &armadaevents.PodError{ - Message: ErrMsg, - NodeName: NodeName, + Message: ErrMsg, + DebugMessage: DebugMsg, + NodeName: NodeName, ContainerErrors: []*armadaevents.ContainerError{ {ExitCode: ExitCode}, }, @@ -475,6 +496,23 @@ var JobRunUnschedulable = &armadaevents.EventSequence_Event{ }, } +var JobPreempted = &armadaevents.EventSequence_Event{ + Created: &testfixtures.BaseTime, + Event: &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{ + JobId: JobIdProto, + Errors: []*armadaevents.Error{ + { + Terminal: true, + Reason: &armadaevents.Error_JobRunPreemptedError{ + JobRunPreemptedError: &armadaevents.JobRunPreemptedError{}, + }, + }, + }, + }, + }, +} + var JobFailed = &armadaevents.EventSequence_Event{ Created: &testfixtures.BaseTime, Event: &armadaevents.EventSequence_Event_JobErrors{ @@ -503,7 +541,7 @@ var JobLeaseReturned = &armadaevents.EventSequence_Event{ Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ JobId: JobIdProto, - RunId: eventutil.LegacyJobRunId(), + RunId: RunIdProto, Errors: []*armadaevents.Error{ { Terminal: true, @@ -512,7 +550,8 @@ var JobLeaseReturned = &armadaevents.EventSequence_Event{ ObjectMeta: &armadaevents.ObjectMeta{ ExecutorId: ExecutorId, }, - Message: LeaseReturnedMsg, + Message: LeaseReturnedMsg, + DebugMessage: DebugMsg, }, }, }, diff --git a/internal/common/metrics/scheduler_metrics.go b/internal/common/metrics/scheduler_metrics.go index 3f33bfff44c..64bf0ab5ff1 100644 --- a/internal/common/metrics/scheduler_metrics.go +++ b/internal/common/metrics/scheduler_metrics.go @@ -15,6 +15,13 @@ var QueueSizeDesc = prometheus.NewDesc( nil, ) +var QueueDistinctSchedulingKeysDesc = prometheus.NewDesc( + MetricPrefix+"queue_distinct_scheduling_keys", + "Number of distinct scheduling keys requested by a queue", + []string{"queueName"}, + nil, +) + var QueuePriorityDesc = prometheus.NewDesc( MetricPrefix+"queue_priority", "Priority of a queue", @@ -202,10 +209,11 @@ func Describe(out chan<- *prometheus.Desc) { } } -func CollectQueueMetrics(queueCounts map[string]int, metricsProvider QueueMetricProvider) []prometheus.Metric { +func CollectQueueMetrics(queueCounts map[string]int, queueDistinctSchedulingKeyCounts map[string]int, metricsProvider QueueMetricProvider) []prometheus.Metric { metrics := make([]prometheus.Metric, 0, len(AllDescs)) for q, count := range queueCounts { metrics = append(metrics, NewQueueSizeMetric(count, q)) + metrics = append(metrics, NewQueueDistinctSchedulingKeyMetric(queueDistinctSchedulingKeyCounts[q], q)) queuedJobMetrics := metricsProvider.GetQueuedJobMetrics(q) runningJobMetrics := metricsProvider.GetRunningJobMetrics(q) for _, m := range queuedJobMetrics { @@ -263,6 +271,10 @@ func NewQueueSizeMetric(value int, queue string) prometheus.Metric { return prometheus.MustNewConstMetric(QueueSizeDesc, prometheus.GaugeValue, float64(value), queue) } +func NewQueueDistinctSchedulingKeyMetric(value int, queue string) prometheus.Metric { + return prometheus.MustNewConstMetric(QueueDistinctSchedulingKeysDesc, prometheus.GaugeValue, float64(value), queue) +} + func NewQueueDuration(count uint64, sum float64, buckets map[float64]uint64, pool string, priorityClass string, queue string) prometheus.Metric { return prometheus.MustNewConstHistogram(QueueDurationDesc, count, sum, buckets, pool, priorityClass, queue) } diff --git a/internal/common/pgkeyvalue/db_testutil.go b/internal/common/pgkeyvalue/db_testutil.go deleted file mode 100644 index 37775121adb..00000000000 --- a/internal/common/pgkeyvalue/db_testutil.go +++ /dev/null @@ -1,64 +0,0 @@ -package pgkeyvalue - -import ( - "database/sql" - "fmt" - - "github.com/jackc/pgx/v5/pgxpool" - _ "github.com/jackc/pgx/v5/stdlib" - "github.com/pkg/errors" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" -) - -func withDatabasePgx(action func(db *pgxpool.Pool) error) error { - ctx := armadacontext.Background() - - // Connect and create a dedicated database for the test - // For now use database/sql for this - dbName := "test_" + util.NewULID() - connectionString := "host=localhost port=5432 user=postgres password=psw sslmode=disable" - db, err := sql.Open("pgx", connectionString) - if err != nil { - return errors.WithStack(err) - } - defer db.Close() - - _, err = db.Exec("CREATE DATABASE " + dbName) - if err != nil { - return errors.WithStack(err) - } - - // Connect again- this time to the database we just created and using pgx pool. This will be used for tests - testDbPool, err := pgxpool.New(ctx, connectionString+" dbname="+dbName) - if err != nil { - return errors.WithStack(err) - } - - defer func() { - testDbPool.Close() - - // disconnect all db user before cleanup - _, err = db.Exec( - `SELECT pg_terminate_backend(pg_stat_activity.pid) - FROM pg_stat_activity WHERE pg_stat_activity.datname = '` + dbName + `';`) - if err != nil { - fmt.Println("Failed to disconnect users") - } - - _, err = db.Exec("DROP DATABASE " + dbName) - if err != nil { - fmt.Println("Failed to drop database") - } - }() - - // A third connection! We can get rid of this once we use move udateDatabse over to pgx - legacyDb, err := sql.Open("pgx", connectionString+" dbname="+dbName) - if err != nil { - return errors.WithStack(err) - } - defer legacyDb.Close() - - return action(testDbPool) -} diff --git a/internal/common/pgkeyvalue/pgkeyvalue.go b/internal/common/pgkeyvalue/pgkeyvalue.go deleted file mode 100644 index 91b820ecd7e..00000000000 --- a/internal/common/pgkeyvalue/pgkeyvalue.go +++ /dev/null @@ -1,139 +0,0 @@ -package pgkeyvalue - -import ( - "fmt" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/clock" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/database" - "github.com/armadaproject/armada/internal/common/logging" -) - -type KeyValue struct { - Key string `db:"key"` - Value []byte `db:"value"` - Inserted time.Time `db:"inserted"` -} - -type KeyValueStore interface { - Store(ctx *armadacontext.Context, kvs map[string][]byte) error - Load(ctx *armadacontext.Context, keys []string) (map[string][]byte, error) -} - -// PGKeyValueStore is a time-limited key-value store backed by postgres with a local LRU cache. -// The store is write-only, i.e., writing to an existing key will return an error (of type *armadaerrors.ErrAlreadyExists). -// Keys can only be deleted by running the cleanup function. -// Deleting keys does not cause caches to update, i.e., nodes may have an inconsistent view if keys are deleted. -type PGKeyValueStore struct { - // Postgres connection. - db *pgxpool.Pool - // Name of the postgres table used for storage. - tableName string - // Used to set inserted time - clock clock.Clock -} - -func New(ctx *armadacontext.Context, db *pgxpool.Pool, tableName string) (*PGKeyValueStore, error) { - if db == nil { - return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ - Name: "db", - Value: db, - Message: "db must be non-nil", - }) - } - if tableName == "" { - return nil, errors.WithStack(&armadaerrors.ErrInvalidArgument{ - Name: "TableName", - Value: tableName, - Message: "TableName must be non-empty", - }) - } - err := createTableIfNotExists(ctx, db, tableName) - if err != nil { - return nil, errors.WithStack(err) - } - return &PGKeyValueStore{ - db: db, - tableName: tableName, - clock: clock.RealClock{}, - }, nil -} - -func (c *PGKeyValueStore) Load(ctx *armadacontext.Context, keys []string) (map[string][]byte, error) { - rows, err := c.db.Query(ctx, fmt.Sprintf("SELECT KEY, VALUE FROM %s WHERE KEY = any($1)", c.tableName), keys) - if err != nil { - return nil, errors.WithStack(err) - } - kv := make(map[string][]byte, len(keys)) - for rows.Next() { - key := "" - var value []byte = nil - err := rows.Scan(&key, &value) - if err != nil { - return nil, errors.WithStack(err) - } - kv[key] = value - } - return kv, nil -} - -func (c *PGKeyValueStore) Store(ctx *armadacontext.Context, kvs map[string][]byte) error { - data := make([]KeyValue, 0, len(kvs)) - for k, v := range kvs { - data = append(data, KeyValue{ - Key: k, - Value: v, - Inserted: c.clock.Now(), - }) - } - return database.UpsertWithTransaction(ctx, c.db, c.tableName, data) -} - -func createTableIfNotExists(ctx *armadacontext.Context, db *pgxpool.Pool, tableName string) error { - _, err := db.Exec(ctx, fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - key TEXT PRIMARY KEY, - value BYTEA, - inserted TIMESTAMP not null - );`, tableName)) - return err -} - -// Cleanup removes all key-value pairs older than lifespan. -func (c *PGKeyValueStore) cleanup(ctx *armadacontext.Context, lifespan time.Duration) error { - sql := fmt.Sprintf("DELETE FROM %s WHERE (inserted <= $1);", c.tableName) - _, err := c.db.Exec(ctx, sql, c.clock.Now().Add(-lifespan)) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// PeriodicCleanup starts a goroutine that automatically runs the cleanup job -// every interval until the provided context is cancelled. -func (c *PGKeyValueStore) PeriodicCleanup(ctx *armadacontext.Context, interval time.Duration, lifespan time.Duration) error { - log := logrus.StandardLogger().WithField("service", "PGKeyValueStoreCleanup") - log.Info("service started") - ticker := c.clock.NewTicker(interval) - for { - select { - case <-ctx.Done(): - ticker.Stop() - return nil - case <-ticker.C(): - start := time.Now() - err := c.cleanup(ctx, lifespan) - if err != nil { - logging.WithStacktrace(log, err).WithField("delay", time.Since(start)).Warn("cleanup failed") - } else { - log.WithField("delay", c.clock.Since(start)).Info("cleanup succeeded") - } - } - } -} diff --git a/internal/common/pgkeyvalue/pgkeyvalue_test.go b/internal/common/pgkeyvalue/pgkeyvalue_test.go deleted file mode 100644 index 3a913f0b6f6..00000000000 --- a/internal/common/pgkeyvalue/pgkeyvalue_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package pgkeyvalue - -import ( - "testing" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" - "k8s.io/apimachinery/pkg/util/clock" - - "github.com/armadaproject/armada/internal/common/armadacontext" -) - -func TestLoadStore(t *testing.T) { - ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) - defer cancel() - err := withDatabasePgx(func(db *pgxpool.Pool) error { - kvStore, err := New(ctx, db, "cachetable") - require.NoError(t, err) - - data1 := map[string][]byte{ - "a": {0x1}, "b": {0x2}, "c": {0x3}, - } - err = kvStore.Store(ctx, data1) - require.NoError(t, err) - - loaded, err := kvStore.Load(ctx, maps.Keys(data1)) - require.NoError(t, err) - assert.Equal(t, data1, loaded) - - data2 := map[string][]byte{"c": {0x4}, "d": {0x5}} - err = kvStore.Store(ctx, data2) - require.NoError(t, err) - - loaded, err = kvStore.Load(ctx, []string{"a", "b", "c", "d"}) - require.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "a": {0x1}, "b": {0x2}, "c": {0x4}, "d": {0x5}, - }, loaded) - - return nil - }) - require.NoError(t, err) -} - -func TestCleanup(t *testing.T) { - ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 10*time.Second) - defer cancel() - err := withDatabasePgx(func(db *pgxpool.Pool) error { - baseTime := time.Now() - testClock := clock.NewFakeClock(baseTime) - kvStore, err := New(ctx, db, "cachetable") - kvStore.clock = testClock - require.NoError(t, err) - - // Data that will be cleaned up - data1 := map[string][]byte{"a": {0x1}, "b": {0x2}} - err = kvStore.Store(ctx, data1) - require.NoError(t, err) - - // advance the clock - testClock.SetTime(testClock.Now().Add(5 * time.Second)) - - // Data that won't be cleaned up - data2 := map[string][]byte{"c": {0x3}} - err = kvStore.Store(ctx, data2) - require.NoError(t, err) - - loaded, err := kvStore.Load(ctx, []string{"a", "b", "c"}) - require.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "a": {0x1}, "b": {0x2}, "c": {0x3}, - }, loaded) - - // Run the cleanup. - err = kvStore.cleanup(ctx, 5*time.Second) - require.NoError(t, err) - - loaded, err = kvStore.Load(ctx, []string{"a", "b", "c"}) - require.NoError(t, err) - assert.Equal(t, map[string][]byte{"c": {0x3}}, loaded) - return nil - }) - require.NoError(t, err) -} diff --git a/internal/common/pointer/pointer.go b/internal/common/pointer/pointer.go index f297dd04f3c..51b6596bbe2 100644 --- a/internal/common/pointer/pointer.go +++ b/internal/common/pointer/pointer.go @@ -2,10 +2,6 @@ package pointer import "time" -func Pointer[T any](v T) *T { - return &v -} - // Now returns a pointer to the current time func Now() *time.Time { return Time(time.Now()) diff --git a/internal/common/pulsarutils/eventsequence.go b/internal/common/pulsarutils/eventsequence.go index 3750a1b11e8..981aa824e09 100644 --- a/internal/common/pulsarutils/eventsequence.go +++ b/internal/common/pulsarutils/eventsequence.go @@ -11,13 +11,12 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/requestid" - "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) // CompactAndPublishSequences reduces the number of sequences to the smallest possible, // while respecting per-job set ordering and max Pulsar message size, and then publishes to Pulsar. -func CompactAndPublishSequences(ctx *armadacontext.Context, sequences []*armadaevents.EventSequence, producer pulsar.Producer, maxMessageSizeInBytes uint, scheduler schedulers.Scheduler) error { +func CompactAndPublishSequences(ctx *armadacontext.Context, sequences []*armadaevents.EventSequence, producer pulsar.Producer, maxMessageSizeInBytes uint) error { // Reduce the number of sequences to send to the minimum possible, // and then break up any sequences larger than maxMessageSizeInBytes. sequences = eventutil.CompactEventSequences(sequences) @@ -25,7 +24,7 @@ func CompactAndPublishSequences(ctx *armadacontext.Context, sequences []*armadae if err != nil { return err } - return PublishSequences(ctx, producer, sequences, scheduler) + return PublishSequences(ctx, producer, sequences) } // PublishSequences publishes several event sequences to Pulsar. @@ -37,7 +36,7 @@ func CompactAndPublishSequences(ctx *armadacontext.Context, sequences []*armadae // and // eventutil.LimitSequencesByteSize(sequences, int(srv.MaxAllowedMessageSize)) // before passing to this function. -func PublishSequences(ctx *armadacontext.Context, producer pulsar.Producer, sequences []*armadaevents.EventSequence, scheduler schedulers.Scheduler) error { +func PublishSequences(ctx *armadacontext.Context, producer pulsar.Producer, sequences []*armadaevents.EventSequence) error { // Incoming gRPC requests are annotated with a unique id. // Pass this id through the log by adding it to the Pulsar message properties. requestId := requestid.FromContextOrMissing(ctx) @@ -69,8 +68,7 @@ func PublishSequences(ctx *armadacontext.Context, producer pulsar.Producer, sequ &pulsar.ProducerMessage{ Payload: payloads[i], Properties: map[string]string{ - requestid.MetadataKey: requestId, - schedulers.PropertyName: schedulers.MsgPropertyFromScheduler(scheduler), + requestid.MetadataKey: requestId, }, Key: sequences[i].JobSetName, }, diff --git a/internal/common/pulsarutils/eventsequence_test.go b/internal/common/pulsarutils/eventsequence_test.go index 0832195beac..4801aa60534 100644 --- a/internal/common/pulsarutils/eventsequence_test.go +++ b/internal/common/pulsarutils/eventsequence_test.go @@ -10,19 +10,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) func TestPublishSequences_SendAsyncErr(t *testing.T) { producer := &mockProducer{} - err := PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) + err := PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}) assert.NoError(t, err) producer = &mockProducer{ sendAsyncErr: errors.New("sendAsyncErr"), } - err = PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) + err = PublishSequences(armadacontext.Background(), producer, []*armadaevents.EventSequence{{}}) assert.ErrorIs(t, err, producer.sendAsyncErr) } @@ -32,7 +31,7 @@ func TestPublishSequences_RespectTimeout(t *testing.T) { } ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond) defer cancel() - err := PublishSequences(ctx, producer, []*armadaevents.EventSequence{{}}, schedulers.Pulsar) + err := PublishSequences(ctx, producer, []*armadaevents.EventSequence{{}}) assert.ErrorIs(t, err, context.DeadlineExceeded) } diff --git a/internal/common/pulsarutils/publisher.go b/internal/common/pulsarutils/publisher.go index d3503621f13..19213cce335 100644 --- a/internal/common/pulsarutils/publisher.go +++ b/internal/common/pulsarutils/publisher.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -46,8 +45,7 @@ func (p *PulsarPublisher) PublishMessages(ctx *armadacontext.Context, es *armada ctx, []*armadaevents.EventSequence{es}, p.producer, - p.maxAllowedMessageSize, - schedulers.Pulsar) + p.maxAllowedMessageSize) } func (p *PulsarPublisher) Close() { diff --git a/internal/common/requestid/interceptors.go b/internal/common/requestid/interceptors.go index 0dd77947786..b5ee03cd058 100644 --- a/internal/common/requestid/interceptors.go +++ b/internal/common/requestid/interceptors.go @@ -9,7 +9,7 @@ import ( "google.golang.org/grpc/metadata" ) -// Request IDs are embedded in HTTP headers using this key. +// MetadataKey is the HTTP header key using this key we use to store request ids. // This is the standard key used for request Ids. For example, opentelemetry uses the same one. const MetadataKey = "x-request-id" diff --git a/internal/common/schedulers/scheduler.go b/internal/common/schedulers/scheduler.go deleted file mode 100644 index 51439de289d..00000000000 --- a/internal/common/schedulers/scheduler.go +++ /dev/null @@ -1,34 +0,0 @@ -package schedulers - -import ( - log "github.com/sirupsen/logrus" -) - -type Scheduler int - -const ( - Legacy Scheduler = iota - Pulsar - All -) - -const ( - PropertyName string = "schedulerName" - PulsarSchedulerAttribute string = "pulsar" - LegacySchedulerAttribute string = "legacy" - AllSchedulersAttribute string = "all" -) - -// MsgPropertyFromScheduler returns the pulsar message property associated with the scheduler -func MsgPropertyFromScheduler(s Scheduler) string { - switch s { - case Pulsar: - return PulsarSchedulerAttribute - case Legacy: - return LegacySchedulerAttribute - case All: - return AllSchedulersAttribute - } - log.Warnf("Unknown scheduler [%d]. Defaulting to legacy scheduler", s) - return LegacySchedulerAttribute -} diff --git a/internal/common/task/background_task.go b/internal/common/task/background_task.go index 35ace370193..392e4898c8e 100644 --- a/internal/common/task/background_task.go +++ b/internal/common/task/background_task.go @@ -19,7 +19,7 @@ type task struct { } // BackgroundTaskManager is used for registering tasks (functions) to be run periodically. -// Prometehus log names for each task are prepended with metricsPrefix. +// Prometheus log names for each task are prepended with metricsPrefix. // BackgroundTaskManager is not threadsafe; it should only be accessed from a single thread. type BackgroundTaskManager struct { tasks []*task diff --git a/internal/common/util/batch.go b/internal/common/util/batch.go deleted file mode 100644 index 2db193b0f4a..00000000000 --- a/internal/common/util/batch.go +++ /dev/null @@ -1,26 +0,0 @@ -package util - -import "math" - -func Batch[T any](elements []T, batchSize int) [][]T { - total := len(elements) - - totalFullBatches := int(math.Floor(float64(total) / float64(batchSize))) - lastBatchSize := total % batchSize - totalBatches := totalFullBatches - if lastBatchSize != 0 { - totalBatches++ - } - - batches := make([][]T, totalBatches) - - for i := 0; i < totalFullBatches; i++ { - batches[i] = elements[i*batchSize : (i+1)*batchSize] - } - - if lastBatchSize != 0 { - batches[totalFullBatches] = elements[totalFullBatches*batchSize : totalFullBatches*batchSize+lastBatchSize] - } - - return batches -} diff --git a/internal/common/util/batch_test.go b/internal/common/util/batch_test.go deleted file mode 100644 index 134d0df4937..00000000000 --- a/internal/common/util/batch_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBatch(t *testing.T) { - assert.Equal(t, [][]string{}, Batch([]string{}, 1)) - assert.Equal(t, [][]string{{"a"}}, Batch([]string{"a"}, 1)) - assert.Equal(t, [][]string{{"a"}}, Batch([]string{"a"}, 10)) - assert.Equal(t, [][]string{{"a"}, {"b"}}, Batch([]string{"a", "b"}, 1)) - assert.Equal(t, [][]string{{"a", "b"}}, Batch([]string{"a", "b"}, 2)) - assert.Equal(t, [][]string{{"a", "b"}}, Batch([]string{"a", "b"}, 3)) - assert.Equal(t, [][]string{{"a", "b", "c"}, {"d", "e"}}, Batch([]string{"a", "b", "c", "d", "e"}, 3)) - assert.Equal(t, [][]string{{"a", "b", "c"}, {"d", "e", "f"}}, Batch([]string{"a", "b", "c", "d", "e", "f"}, 3)) -} diff --git a/internal/common/util/clock.go b/internal/common/util/clock.go deleted file mode 100644 index 21620d8e839..00000000000 --- a/internal/common/util/clock.go +++ /dev/null @@ -1,23 +0,0 @@ -package util - -import "time" - -type Clock interface { - Now() time.Time -} - -type DefaultClock struct{} - -func (c *DefaultClock) Now() time.Time { return time.Now() } - -type UTCClock struct{} - -func (c *UTCClock) Now() time.Time { return time.Now().UTC() } - -type DummyClock struct { - T time.Time -} - -func (c *DummyClock) Now() time.Time { - return c.T -} diff --git a/internal/common/util/context.go b/internal/common/util/context.go deleted file mode 100644 index 1f6fa6519f4..00000000000 --- a/internal/common/util/context.go +++ /dev/null @@ -1,12 +0,0 @@ -package util - -import ( - "time" - - "github.com/armadaproject/armada/internal/common/armadacontext" -) - -func CloseToDeadline(ctx *armadacontext.Context, tolerance time.Duration) bool { - deadline, exists := ctx.Deadline() - return exists && deadline.Before(time.Now().Add(tolerance)) -} diff --git a/internal/common/util/job.go b/internal/common/util/job.go deleted file mode 100644 index c309285583e..00000000000 --- a/internal/common/util/job.go +++ /dev/null @@ -1,20 +0,0 @@ -package util - -import ( - v1 "k8s.io/api/core/v1" - - "github.com/armadaproject/armada/pkg/api" -) - -func PodSpecFromJob(job *api.Job) *v1.PodSpec { - // TODO: Remove - if job.PodSpec != nil { - return job.PodSpec - } - for _, podSpec := range job.PodSpecs { - if podSpec != nil { - return podSpec - } - } - return nil -} diff --git a/internal/common/util/list.go b/internal/common/util/list.go index 10416199d00..5247521443a 100644 --- a/internal/common/util/list.go +++ b/internal/common/util/list.go @@ -18,41 +18,3 @@ func StringListToSet(list []string) map[string]bool { } return set } - -func ContainsString(list []string, val string) bool { - for _, elem := range list { - if elem == val { - return true - } - } - return false -} - -func DeepCopyListUint32(list []uint32) []uint32 { - result := make([]uint32, 0, len(list)) - for _, v := range list { - result = append(result, v) - } - return result -} - -func Concat[T any](slices ...[]T) []T { - total := 0 - for _, s := range slices { - total += len(s) - } - result := make([]T, total) - var i int - for _, s := range slices { - i += copy(result[i:], s) - } - return result -} - -func Map[T any, U any](list []T, fn func(val T) U) []U { - out := make([]U, len(list)) - for i, val := range list { - out[i] = fn(val) - } - return out -} diff --git a/internal/common/util/list_test.go b/internal/common/util/list_test.go deleted file mode 100644 index 39066948898..00000000000 --- a/internal/common/util/list_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package util - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestConcat_Empty(t *testing.T) { - output := Concat([]int{}) - assert.Equal(t, []int{}, output) -} - -func TestConcat(t *testing.T) { - output := Concat( - []int{1, 2, 3}, - []int{4, 5, 6, 7}, - []int{8, 9, 10, 11, 12}, - ) - assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, output) -} - -func TestMap_Empty(t *testing.T) { - output := Map([]int{}, func(val int) int { return val }) - assert.Equal(t, []int{}, output) -} - -func TestMap(t *testing.T) { - output := Map([]int{2, 4, 6, 11}, func(val int) string { return fmt.Sprintf("%d", val) }) - assert.Equal(t, []string{"2", "4", "6", "11"}, output) -} diff --git a/internal/common/util/lookout.go b/internal/common/util/lookout.go deleted file mode 100644 index e791bab20c2..00000000000 --- a/internal/common/util/lookout.go +++ /dev/null @@ -1,15 +0,0 @@ -package util - -import "strings" - -const MaxMessageLength = 2048 - -func RemoveNullsFromString(s string) string { - return strings.ReplaceAll(s, "\000", "") -} - -func RemoveNullsFromJson(json []byte) []byte { - jsonString := string(json) - jsonString = strings.ReplaceAll(jsonString, "\\u0000", "") - return []byte(jsonString) -} diff --git a/internal/common/util/lookout_test.go b/internal/common/util/lookout_test.go deleted file mode 100644 index c90399db85e..00000000000 --- a/internal/common/util/lookout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package util - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRemoveNullsFromString(t *testing.T) { - assert.Equal(t, "", RemoveNullsFromString("")) - assert.Equal(t, "", RemoveNullsFromString("\000\000\000")) - assert.NotContains(t, RemoveNullsFromString("Hello \000 World"), "\000") -} - -func TestRemoveNullsFromJson(t *testing.T) { - someMap := map[string]string{ - "hello": "world", - "one": "\000 two", - } - jsonData, err := json.Marshal(someMap) - assert.NoError(t, err) - - s := RemoveNullsFromJson(jsonData) - assert.NotContains(t, string(s), "\\u000") -} diff --git a/internal/common/util/map.go b/internal/common/util/map.go index 661990b904a..9bb4677e645 100644 --- a/internal/common/util/map.go +++ b/internal/common/util/map.go @@ -1,15 +1,7 @@ package util -func GetOrDefault(m map[string]float64, key string, def float64) float64 { - v, ok := m[key] - if ok { - return v - } - return def -} - -func MergeMaps(a map[string]string, b map[string]string) map[string]string { - result := make(map[string]string) +func MergeMaps[K comparable, V any](a map[K]V, b map[K]V) map[K]V { + result := make(map[K]V) for k, v := range a { result[k] = v } @@ -19,19 +11,7 @@ func MergeMaps(a map[string]string, b map[string]string) map[string]string { return result } -func DeepCopy(a map[string]string) map[string]string { - if a == nil { - return nil - } - - result := make(map[string]string) - for k, v := range a { - result[k] = v - } - return result -} - -func Equal(a map[string]string, b map[string]string) bool { +func Equal[K comparable, V comparable](a map[K]V, b map[K]V) bool { if len(a) != len(b) { return false } @@ -44,12 +24,12 @@ func Equal(a map[string]string, b map[string]string) bool { return true } -func FilterKeys(a map[string]string, keys []string) map[string]string { +func FilterKeys[K comparable, V any](a map[K]V, keys []K) map[K]V { if a == nil { return nil } - result := make(map[string]string) + result := make(map[K]V) for _, key := range keys { if val, exists := a[key]; exists { result[key] = val diff --git a/internal/common/util/map_test.go b/internal/common/util/map_test.go index 1c9f0b44f2d..0a385df742c 100644 --- a/internal/common/util/map_test.go +++ b/internal/common/util/map_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "golang.org/x/exp/maps" ) func TestMergeMaps_AllValuesPresent(t *testing.T) { @@ -51,25 +52,25 @@ func TestMergeMaps_Nil(t *testing.T) { } assert.Equal(t, MergeMaps(map1, nil), map1) assert.Equal(t, MergeMaps(nil, map1), map1) - assert.Equal(t, MergeMaps(nil, nil), map[string]string{}) + assert.Equal(t, MergeMaps[string, string](nil, nil), map[string]string{}) } func TestEqual(t *testing.T) { map1 := map[string]string{ "a": "value1", } - map2 := DeepCopy(map1) + map2 := maps.Clone(map1) assert.True(t, Equal(map1, map2)) - map3 := DeepCopy(map1) + map3 := maps.Clone(map1) map3["a"] = "value2" assert.False(t, Equal(map1, map3)) - map4 := DeepCopy(map1) + map4 := maps.Clone(map1) delete(map4, "a") assert.False(t, Equal(map1, map4)) - map5 := DeepCopy(map1) + map5 := maps.Clone(map1) map5["b"] = "value2" assert.False(t, Equal(map1, map5)) } @@ -80,7 +81,7 @@ func TestEqual_Nil(t *testing.T) { } assert.False(t, Equal(map1, nil)) assert.False(t, Equal(nil, map1)) - assert.True(t, Equal(nil, nil)) + assert.True(t, Equal[string, string](nil, nil)) } func Test_FilterKeys(t *testing.T) { @@ -94,7 +95,7 @@ func Test_FilterKeys(t *testing.T) { } func Test_FilterKeys_Nil(t *testing.T) { - assert.Nil(t, FilterKeys(nil, nil)) - assert.Nil(t, FilterKeys(nil, []string{})) + assert.Nil(t, FilterKeys[string, string](nil, nil)) + assert.Nil(t, FilterKeys[string, string](nil, []string{})) assert.Equal(t, map[string]string{}, FilterKeys(map[string]string{"a": "b"}, nil)) } diff --git a/internal/executor/application.go b/internal/executor/application.go index e5407ccea21..e26b6ec57ff 100644 --- a/internal/executor/application.go +++ b/internal/executor/application.go @@ -14,7 +14,7 @@ import ( "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/cluster" @@ -124,7 +124,7 @@ func StartUpWithContext( taskManager *task.BackgroundTaskManager, wg *sync.WaitGroup, ) (func(), *sync.WaitGroup) { - nodeInfoService := node.NewKubernetesNodeInfoService(clusterContext, config.Kubernetes.ToleratedTaints) + nodeInfoService := node.NewKubernetesNodeInfoService(clusterContext, config.Kubernetes.NodeTypeLabel, config.Kubernetes.ToleratedTaints) podUtilisationService := utilisation.NewPodUtilisationService( clusterContext, nodeInfoService, @@ -144,7 +144,11 @@ func StartUpWithContext( stopExecutorApiComponents := setupExecutorApiComponents(log, config, clusterContext, clusterHealthMonitor, taskManager, pendingPodChecker, nodeInfoService, podUtilisationService) - resourceCleanupService := service.NewResourceCleanupService(clusterContext, config.Kubernetes) + resourceCleanupService, err := service.NewResourceCleanupService(clusterContext, config.Kubernetes) + if err != nil { + log.Errorf("Error creating resource cleanup service: %s", err) + os.Exit(-1) + } taskManager.Register(resourceCleanupService.CleanupResources, config.Task.ResourceCleanupInterval, "resource_cleanup") if config.Metric.ExposeQueueUsageMetrics { @@ -192,12 +196,16 @@ func setupExecutorApiComponents( config.Kubernetes.MinimumResourcesMarkedAllocatedToNonArmadaPodsPerNodePriority, ) - eventReporter, stopReporter := reporter.NewJobEventReporter( + eventReporter, stopReporter, err := reporter.NewJobEventReporter( clusterContext, jobRunState, eventSender, clock.RealClock{}, 200) + if err != nil { + log.Errorf("Failed to create job event reporter: %s", err) + os.Exit(-1) + } submitter := job.NewSubmitter( clusterContext, @@ -227,7 +235,7 @@ func setupExecutorApiComponents( submitter, clusterHealthMonitor, ) - podIssueService := service.NewIssueHandler( + podIssueService, err := service.NewIssueHandler( jobRunState, clusterContext, eventReporter, @@ -235,6 +243,10 @@ func setupExecutorApiComponents( pendingPodChecker, config.Kubernetes.StuckTerminatingPodExpiry, ) + if err != nil { + log.Errorf("Failed to create pod issue service: %s", err) + os.Exit(-1) + } taskManager.Register(podIssueService.HandlePodIssues, config.Task.PodIssueHandlingInterval, "pod_issue_handling") taskManager.Register(preemptRunProcessor.Run, config.Task.StateProcessorInterval, "preempt_runs") @@ -242,16 +254,24 @@ func setupExecutorApiComponents( taskManager.Register(jobRequester.RequestJobsRuns, config.Task.AllocateSpareClusterCapacityInterval, "request_runs") taskManager.Register(clusterAllocationService.AllocateSpareClusterCapacity, config.Task.AllocateSpareClusterCapacityInterval, "submit_runs") taskManager.Register(eventReporter.ReportMissingJobEvents, config.Task.MissingJobEventReconciliationInterval, "event_reconciliation") - pod_metrics.ExposeClusterContextMetrics(clusterContext, clusterUtilisationService, podUtilisationService, nodeInfoService) + _, err = pod_metrics.ExposeClusterContextMetrics(clusterContext, clusterUtilisationService, podUtilisationService, nodeInfoService) + if err != nil { + log.Errorf("Failed to setup cluster context metrics: %s", err) + os.Exit(-1) + } runStateMetricsCollector := runstate.NewJobRunStateStoreMetricsCollector(jobRunState) prometheus.MustRegister(runStateMetricsCollector) if config.Metric.ExposeQueueUsageMetrics && config.Task.UtilisationEventReportingInterval > 0 { - podUtilisationReporter := utilisation.NewUtilisationEventReporter( + podUtilisationReporter, err := utilisation.NewUtilisationEventReporter( clusterContext, podUtilisationService, eventReporter, config.Task.UtilisationEventReportingInterval) + if err != nil { + log.Errorf("Failed to pod utilisation reporter: %s", err) + os.Exit(-1) + } taskManager.Register( podUtilisationReporter.ReportUtilisationEvents, config.Task.UtilisationEventProcessingInterval, diff --git a/internal/executor/configuration/types.go b/internal/executor/configuration/types.go index 11a21f02813..49b9c524a89 100644 --- a/internal/executor/configuration/types.go +++ b/internal/executor/configuration/types.go @@ -56,6 +56,7 @@ type KubernetesConfiguration struct { QPS float32 Burst int Etcd EtcdConfiguration + NodeTypeLabel string NodeIdLabel string TrackedNodeLabels []string AvoidNodeLabelsOnRetry []string diff --git a/internal/executor/context/cluster_context.go b/internal/executor/context/cluster_context.go index 0d71efa3593..53773658e45 100644 --- a/internal/executor/context/cluster_context.go +++ b/internal/executor/context/cluster_context.go @@ -15,7 +15,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/informers" informer "k8s.io/client-go/informers/core/v1" discovery_informer "k8s.io/client-go/informers/discovery/v1" @@ -23,6 +22,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + "k8s.io/utils/clock" "k8s.io/utils/pointer" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -43,7 +43,7 @@ type ClusterIdentity interface { type ClusterContext interface { ClusterIdentity - AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) + AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) (cache.ResourceEventHandlerRegistration, error) GetBatchPods() ([]*v1.Pod, error) GetAllPods() ([]*v1.Pod, error) GetActiveBatchPods() ([]*v1.Pod, error) @@ -124,7 +124,7 @@ func NewClusterContext( clock: clock.RealClock{}, } - context.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := context.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -134,6 +134,9 @@ func NewClusterContext( context.submittedPods.Delete(util.ExtractPodKey(pod)) }, }) + if err != nil { + panic(err) + } // Use node informer so it is initialised properly context.nodeInformer.Lister() @@ -141,7 +144,7 @@ func NewClusterContext( context.ingressInformer.Lister() context.endpointSliceInformer.Lister() - err := context.eventInformer.Informer().AddIndexers(cache.Indexers{podByUIDIndex: indexPodByUID}) + err = context.eventInformer.Informer().AddIndexers(cache.Indexers{podByUIDIndex: indexPodByUID}) if err != nil { panic(err) } @@ -160,8 +163,8 @@ func indexPodByUID(obj interface{}) (strings []string, err error) { return []string{string(event.InvolvedObject.UID)}, nil } -func (c *KubernetesClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) { - c.podInformer.Informer().AddEventHandler(handler) +func (c *KubernetesClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) (cache.ResourceEventHandlerRegistration, error) { + return c.podInformer.Informer().AddEventHandler(handler) } func (c *KubernetesClusterContext) Stop() { diff --git a/internal/executor/context/cluster_context_test.go b/internal/executor/context/cluster_context_test.go index b382cd0e690..ad3b0a08b54 100644 --- a/internal/executor/context/cluster_context_test.go +++ b/internal/executor/context/cluster_context_test.go @@ -15,11 +15,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" clientTesting "k8s.io/client-go/testing" + clock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "github.com/armadaproject/armada/internal/common/armadacontext" diff --git a/internal/executor/context/fake/sync_cluster_context.go b/internal/executor/context/fake/sync_cluster_context.go index bf663430241..c966aed357d 100644 --- a/internal/executor/context/fake/sync_cluster_context.go +++ b/internal/executor/context/fake/sync_cluster_context.go @@ -13,23 +13,30 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/domain" + util2 "github.com/armadaproject/armada/internal/executor/util" ) type SyncFakeClusterContext struct { Pods map[string]*v1.Pod + Events map[string][]*v1.Event AnnotationsAdded map[string]map[string]string podEventHandlers []*cache.ResourceEventHandlerFuncs } func NewSyncFakeClusterContext() *SyncFakeClusterContext { - c := &SyncFakeClusterContext{Pods: map[string]*v1.Pod{}, AnnotationsAdded: map[string]map[string]string{}} + c := &SyncFakeClusterContext{ + Pods: map[string]*v1.Pod{}, + Events: map[string][]*v1.Event{}, + AnnotationsAdded: map[string]map[string]string{}, + } return c } func (*SyncFakeClusterContext) Stop() {} -func (c *SyncFakeClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) { +func (c *SyncFakeClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) (cache.ResourceEventHandlerRegistration, error) { c.podEventHandlers = append(c.podEventHandlers, &handler) + return nil, nil } func (c *SyncFakeClusterContext) GetBatchPods() ([]*v1.Pod, error) { @@ -57,7 +64,8 @@ func (c *SyncFakeClusterContext) GetNode(nodeName string) (*v1.Node, error) { } func (c *SyncFakeClusterContext) GetPodEvents(pod *v1.Pod) ([]*v1.Event, error) { - return []*v1.Event{}, nil + jobId := util2.ExtractJobId(pod) + return c.Events[jobId], nil } func (c *SyncFakeClusterContext) SubmitService(service *v1.Service) (*v1.Service, error) { diff --git a/internal/executor/fake/application.go b/internal/executor/fake/application.go index c2d28362c1f..8f26b43405f 100644 --- a/internal/executor/fake/application.go +++ b/internal/executor/fake/application.go @@ -16,7 +16,7 @@ func StartUp(config configuration.ExecutorConfiguration, nodes []*context.NodeSp wg := &sync.WaitGroup{} wg.Add(1) return executor.StartUpWithContext( - logrus.NewEntry(logrus.New()), + logrus.NewEntry(logrus.StandardLogger()), config, context.NewFakeClusterContext(config.Application, config.Kubernetes.NodeIdLabel, nodes), nil, diff --git a/internal/executor/fake/context/context.go b/internal/executor/fake/context/context.go index f15c68b5d43..e4ec04a0c9e 100644 --- a/internal/executor/fake/context/context.go +++ b/internal/executor/fake/context/context.go @@ -13,6 +13,7 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1" networking "k8s.io/api/networking/v1" @@ -24,7 +25,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" armadaresource "github.com/armadaproject/armada/internal/common/resource" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" cluster_context "github.com/armadaproject/armada/internal/executor/context" ) @@ -89,8 +89,9 @@ func NewFakeClusterContext(appConfig configuration.ApplicationConfiguration, nod func (*FakeClusterContext) Stop() { } -func (c *FakeClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) { +func (c *FakeClusterContext) AddPodEventHandler(handler cache.ResourceEventHandlerFuncs) (cache.ResourceEventHandlerRegistration, error) { c.podEventHandlers = append(c.podEventHandlers, &handler) + return nil, nil } func (c *FakeClusterContext) GetBatchPods() ([]*v1.Pod, error) { @@ -302,7 +303,7 @@ func (c *FakeClusterContext) addNodes(specs []*NodeSpec) { for _, s := range specs { for i := 0; i < s.Count; i++ { name := c.clusterId + "-" + s.Name + "-" + strconv.Itoa(i) - labels := util.DeepCopy(s.Labels) + labels := maps.Clone(s.Labels) if labels == nil { labels = map[string]string{} } diff --git a/internal/executor/job/job_run_state_store.go b/internal/executor/job/job_run_state_store.go index 49fa498fa45..dfe648fd7eb 100644 --- a/internal/executor/job/job_run_state_store.go +++ b/internal/executor/job/job_run_state_store.go @@ -40,7 +40,7 @@ func NewJobRunStateStore(clusterContext context.ClusterContext) *JobRunStateStor clusterContext: clusterContext, } - clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -53,9 +53,12 @@ func NewJobRunStateStore(clusterContext context.ClusterContext) *JobRunStateStor } }, }) + if err != nil { + panic(err) + } // On start up, make sure our state matches current k8s state - err := stateStore.initialiseStateFromKubernetes() + err = stateStore.initialiseStateFromKubernetes() if err != nil { panic(err) } diff --git a/internal/executor/job/processors/preempt_runs.go b/internal/executor/job/processors/preempt_runs.go index fada566c8ee..9d98a73cfde 100644 --- a/internal/executor/job/processors/preempt_runs.go +++ b/internal/executor/job/processors/preempt_runs.go @@ -78,7 +78,7 @@ func (j *RunPreemptedProcessor) reportPodPreempted(run *job.RunState, pod *v1.Po if err != nil { return fmt.Errorf("failed creating preempted event because - %s", err) } - failedEvent, err := reporter.CreateSimpleJobFailedEvent(pod, "Run preempted", j.clusterContext.GetClusterId(), armadaevents.KubernetesReason_AppError) + failedEvent, err := reporter.CreateSimpleJobFailedEvent(pod, "Run preempted", "", j.clusterContext.GetClusterId(), armadaevents.KubernetesReason_AppError) if err != nil { return fmt.Errorf("failed creating failed event because - %s", err) } @@ -96,7 +96,6 @@ func (j *RunPreemptedProcessor) reportPodPreempted(run *job.RunState, pod *v1.Po domain.JobPreemptedAnnotation: time.Now().String(), string(v1.PodFailed): time.Now().String(), }) - if err != nil { return fmt.Errorf("failed to annotate pod as preempted - %s", err) } diff --git a/internal/executor/metrics/pod_metrics/cluster_context.go b/internal/executor/metrics/pod_metrics/cluster_context.go index 116fdc63c6b..e86881411b1 100644 --- a/internal/executor/metrics/pod_metrics/cluster_context.go +++ b/internal/executor/metrics/pod_metrics/cluster_context.go @@ -79,7 +79,7 @@ func ExposeClusterContextMetrics( utilisationService utilisation.UtilisationService, queueUtilisationService utilisation.PodUtilisationService, nodeInfoService node.NodeInfoService, -) *ClusterContextMetrics { +) (*ClusterContextMetrics, error) { m := &ClusterContextMetrics{ context: context, utilisationService: utilisationService, @@ -94,7 +94,7 @@ func ExposeClusterContextMetrics( []string{queueLabel, phaseLabel}), } - context.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := context.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -111,8 +111,11 @@ func ExposeClusterContextMetrics( m.reportPhase(newPod) }, }) + if err != nil { + return nil, err + } prometheus.MustRegister(m) - return m + return m, nil } func (m *ClusterContextMetrics) reportPhase(pod *v1.Pod) { @@ -217,15 +220,15 @@ func (m *ClusterContextMetrics) Collect(metrics chan<- prometheus.Metric) { } for _, nodeGroup := range nodeGroupAllocationInfos { - metrics <- prometheus.MustNewConstMetric(nodeCountDesc, prometheus.GaugeValue, float64(len(nodeGroup.Nodes)), nodeGroup.NodeType.Id) + metrics <- prometheus.MustNewConstMetric(nodeCountDesc, prometheus.GaugeValue, float64(len(nodeGroup.Nodes)), nodeGroup.NodeType) for resourceType, allocatable := range nodeGroup.NodeGroupAllocatableCapacity { metrics <- prometheus.MustNewConstMetric(nodeAvailableResourceDesc, prometheus.GaugeValue, armadaresource.QuantityAsFloat64(allocatable), resourceType, - nodeGroup.NodeType.Id) + nodeGroup.NodeType) } for resourceType, total := range nodeGroup.NodeGroupCapacity { - metrics <- prometheus.MustNewConstMetric(nodeTotalResourceDesc, prometheus.GaugeValue, armadaresource.QuantityAsFloat64(total), resourceType, nodeGroup.NodeType.Id) + metrics <- prometheus.MustNewConstMetric(nodeTotalResourceDesc, prometheus.GaugeValue, armadaresource.QuantityAsFloat64(total), resourceType, nodeGroup.NodeType) } } } @@ -263,7 +266,7 @@ func (m *ClusterContextMetrics) setEmptyMetrics(podMetrics map[string]map[string func (m *ClusterContextMetrics) createNodeTypeLookup(nodes []*v1.Node) map[string]string { result := map[string]string{} for _, n := range nodes { - result[n.Name] = m.nodeInfoService.GetType(n).Id + result[n.Name] = m.nodeInfoService.GetType(n) } return result } diff --git a/internal/executor/node/node_group.go b/internal/executor/node/node_group.go index 7a22d30f84e..fd0429b16ab 100644 --- a/internal/executor/node/node_group.go +++ b/internal/executor/node/node_group.go @@ -11,33 +11,32 @@ import ( util2 "github.com/armadaproject/armada/internal/executor/util" ) +const defaultNodeType = "none" + type NodeInfoService interface { IsAvailableProcessingNode(*v1.Node) bool GetAllAvailableProcessingNodes() ([]*v1.Node, error) GetAllNodes() ([]*v1.Node, error) GroupNodesByType(nodes []*v1.Node) []*NodeGroup - GetType(node *v1.Node) *NodeTypeIdentifier + GetType(node *v1.Node) string } type KubernetesNodeInfoService struct { clusterContext context.ClusterContext + nodeTypeLabel string toleratedTaints map[string]bool } -func NewKubernetesNodeInfoService(clusterContext context.ClusterContext, toleratedTaints []string) *KubernetesNodeInfoService { +func NewKubernetesNodeInfoService(clusterContext context.ClusterContext, nodeTypeLabel string, toleratedTaints []string) *KubernetesNodeInfoService { return &KubernetesNodeInfoService{ clusterContext: clusterContext, + nodeTypeLabel: nodeTypeLabel, toleratedTaints: util.StringListToSet(toleratedTaints), } } -type NodeTypeIdentifier struct { - Id string - Taints []v1.Taint -} - type NodeGroup struct { - NodeType *NodeTypeIdentifier + NodeType string Nodes []*v1.Node } @@ -46,13 +45,13 @@ func (kubernetesNodeInfoService *KubernetesNodeInfoService) GroupNodesByType(nod for _, node := range nodes { nodeType := kubernetesNodeInfoService.GetType(node) - if _, present := nodeGroupMap[nodeType.Id]; !present { - nodeGroupMap[nodeType.Id] = &NodeGroup{ + if _, present := nodeGroupMap[nodeType]; !present { + nodeGroupMap[nodeType] = &NodeGroup{ NodeType: nodeType, Nodes: []*v1.Node{}, } } - nodeGroupMap[nodeType.Id].Nodes = append(nodeGroupMap[nodeType.Id].Nodes, node) + nodeGroupMap[nodeType].Nodes = append(nodeGroupMap[nodeType].Nodes, node) } nodeGroups := make([]*NodeGroup, 0, len(nodeGroupMap)) @@ -63,17 +62,19 @@ func (kubernetesNodeInfoService *KubernetesNodeInfoService) GroupNodesByType(nod return nodeGroups } -func (kubernetesNodeInfoService *KubernetesNodeInfoService) GetType(node *v1.Node) *NodeTypeIdentifier { - groupId := kubernetesNodeInfoService.clusterContext.GetClusterPool() - relevantTaints := kubernetesNodeInfoService.filterToleratedTaints(node.Spec.Taints) - if len(relevantTaints) > 0 { - groupId = nodeGroupId(relevantTaints) - } +func (kubernetesNodeInfoService *KubernetesNodeInfoService) GetType(node *v1.Node) string { + nodeType := defaultNodeType - return &NodeTypeIdentifier{ - Id: groupId, - Taints: relevantTaints, + if labelValue, ok := node.Labels[kubernetesNodeInfoService.nodeTypeLabel]; ok { + nodeType = labelValue + } else { + relevantTaints := kubernetesNodeInfoService.filterToleratedTaints(node.Spec.Taints) + if len(relevantTaints) > 0 { + nodeType = nodeGroupId(relevantTaints) + } } + + return nodeType } func (kubernetesNodeInfoService *KubernetesNodeInfoService) filterToleratedTaints(taints []v1.Taint) []v1.Taint { diff --git a/internal/executor/node/node_group_test.go b/internal/executor/node/node_group_test.go index d67330be949..4bfcc785116 100644 --- a/internal/executor/node/node_group_test.go +++ b/internal/executor/node/node_group_test.go @@ -11,59 +11,65 @@ import ( fakeContext "github.com/armadaproject/armada/internal/executor/fake/context" ) -var testAppConfig = configuration.ApplicationConfiguration{ClusterId: "test", Pool: "pool"} +var ( + testAppConfig = configuration.ApplicationConfiguration{ClusterId: "test", Pool: "pool"} + nodeTypeLabel = "node-type" +) func TestGetType_WhenNodeHasNoTaint(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"tolerated1", "tolerated2"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) node := createNodeWithTaints("node1") result := nodeInfoService.GetType(node) - assert.Equal(t, result.Id, context.GetClusterPool()) - assert.Equal(t, len(result.Taints), 0) + assert.Equal(t, result, defaultNodeType) +} + +func TestGetType_WhenNodeHasNodeTypeLabel(t *testing.T) { + context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) + + node := createNodeWithTaints("node1", "tolerated1") + node.Labels = map[string]string{nodeTypeLabel: "example-node-type"} + + result := nodeInfoService.GetType(node) + assert.Equal(t, result, "example-node-type") } func TestGetType_WhenNodeHasUntoleratedTaint(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"tolerated1", "tolerated2"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) node := createNodeWithTaints("node1", "untolerated") result := nodeInfoService.GetType(node) - assert.Equal(t, result.Id, context.GetClusterPool()) - assert.Equal(t, len(result.Taints), 0) + assert.Equal(t, result, defaultNodeType) } func TestGetType_WhenNodeHasToleratedTaint(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"tolerated1", "tolerated2"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) node := createNodeWithTaints("node1", "tolerated1") result := nodeInfoService.GetType(node) - assert.Equal(t, result.Id, "tolerated1") - assert.Equal(t, len(result.Taints), 1) - assert.Equal(t, result.Taints, node.Spec.Taints) + assert.Equal(t, result, "tolerated1") node = createNodeWithTaints("node1", "tolerated1", "tolerated2") result = nodeInfoService.GetType(node) - assert.Equal(t, result.Id, "tolerated1,tolerated2") - assert.Equal(t, len(result.Taints), 2) - assert.Equal(t, result.Taints, node.Spec.Taints) + assert.Equal(t, result, "tolerated1,tolerated2") } func TestGetType_WhenSomeNodeTaintsTolerated(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"tolerated1", "tolerated2"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) node := createNodeWithTaints("node1", "tolerated1", "untolerated") result := nodeInfoService.GetType(node) - assert.Equal(t, result.Id, "tolerated1") - assert.Equal(t, len(result.Taints), 1) - assert.Equal(t, result.Taints[0], node.Spec.Taints[0]) + assert.Equal(t, result, "tolerated1") } func TestGroupNodesByType(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"tolerated1", "tolerated2"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"tolerated1", "tolerated2"}) node1 := createNodeWithTaints("node1") node2 := createNodeWithTaints("node2", "untolerated") @@ -75,13 +81,13 @@ func TestGroupNodesByType(t *testing.T) { assert.Equal(t, len(groupedNodes), 3) expected := map[string][]*v1.Node{ - context.GetClusterPool(): {node1, node2}, - "tolerated1": {node3, node4}, - "tolerated1,tolerated2": {node5}, + defaultNodeType: {node1, node2}, + "tolerated1": {node3, node4}, + "tolerated1,tolerated2": {node5}, } for _, nodeGroup := range groupedNodes { - expectedGroup, present := expected[nodeGroup.NodeType.Id] + expectedGroup, present := expected[nodeGroup.NodeType] assert.True(t, present) assert.Equal(t, expectedGroup, nodeGroup.Nodes) } @@ -89,7 +95,7 @@ func TestGroupNodesByType(t *testing.T) { func TestFilterAvailableProcessingNodes(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{}) node := v1.Node{ Spec: v1.NodeSpec{ @@ -104,7 +110,7 @@ func TestFilterAvailableProcessingNodes(t *testing.T) { func TestIsAvailableProcessingNode_IsFalse_UnschedulableNode(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{}) node := v1.Node{ Spec: v1.NodeSpec{ @@ -119,7 +125,7 @@ func TestIsAvailableProcessingNode_IsFalse_UnschedulableNode(t *testing.T) { func TestFilterAvailableProcessingNodes_IsFalse_NodeWithNoScheduleTaint(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{}) taint := v1.Taint{ Key: "taint", @@ -138,7 +144,7 @@ func TestFilterAvailableProcessingNodes_IsFalse_NodeWithNoScheduleTaint(t *testi func TestFilterAvailableProcessingNodes_IsTrue_NodeWithToleratedTaint(t *testing.T) { context := fakeContext.NewFakeClusterContext(testAppConfig, "kubernetes.io/hostname", nil) - nodeInfoService := NewKubernetesNodeInfoService(context, []string{"taint"}) + nodeInfoService := NewKubernetesNodeInfoService(context, nodeTypeLabel, []string{"taint"}) taint := v1.Taint{ Key: "taint", diff --git a/internal/executor/reporter/event.go b/internal/executor/reporter/event.go index 55d36e4f237..f60c3450166 100644 --- a/internal/executor/reporter/event.go +++ b/internal/executor/reporter/event.go @@ -29,8 +29,10 @@ func CreateEventForCurrentState(pod *v1.Pod, clusterId string) (*armadaevents.Ev Created: &now, Event: &armadaevents.EventSequence_Event_JobRunAssigned{ JobRunAssigned: &armadaevents.JobRunAssigned{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), ResourceInfos: []*armadaevents.KubernetesResourceInfo{ { ObjectMeta: &armadaevents.ObjectMeta{ @@ -55,8 +57,10 @@ func CreateEventForCurrentState(pod *v1.Pod, clusterId string) (*armadaevents.Ev Created: &now, Event: &armadaevents.EventSequence_Event_JobRunRunning{ JobRunRunning: &armadaevents.JobRunRunning{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), ResourceInfos: []*armadaevents.KubernetesResourceInfo{ { ObjectMeta: &armadaevents.ObjectMeta{ @@ -82,6 +86,7 @@ func CreateEventForCurrentState(pod *v1.Pod, clusterId string) (*armadaevents.Ev pod, util.ExtractPodFailedReason(pod), util.ExtractPodFailureCause(pod), + "", util.ExtractFailedPodContainerStatuses(pod, clusterId), clusterId) case v1.PodSucceeded: @@ -89,8 +94,10 @@ func CreateEventForCurrentState(pod *v1.Pod, clusterId string) (*armadaevents.Ev Created: &now, Event: &armadaevents.EventSequence_Event_JobRunSucceeded{ JobRunSucceeded: &armadaevents.JobRunSucceeded{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), ResourceInfos: []*armadaevents.KubernetesResourceInfo{ { ObjectMeta: &armadaevents.ObjectMeta{ @@ -137,8 +144,10 @@ func CreateJobUnableToScheduleEvent(pod *v1.Pod, reason string, clusterId string Created: &now, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Errors: []*armadaevents.Error{ { Terminal: false, // EventMessage_UnableToSchedule indicates an issue with job to start up - info only @@ -203,8 +212,10 @@ func CreateJobIngressInfoEvent(pod *v1.Pod, clusterId string, associatedServices Created: &now, Event: &armadaevents.EventSequence_Event_StandaloneIngressInfo{ StandaloneIngressInfo: &armadaevents.StandaloneIngressInfo{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), ObjectMeta: &armadaevents.ObjectMeta{ KubernetesId: string(pod.ObjectMeta.UID), Namespace: pod.Namespace, @@ -233,19 +244,21 @@ func CreateSimpleJobPreemptedEvent(pod *v1.Pod) (*armadaevents.EventSequence, er Created: &now, Event: &armadaevents.EventSequence_Event_JobRunPreempted{ JobRunPreempted: &armadaevents.JobRunPreempted{ - PreemptedJobId: preemptedJobId, - PreemptedRunId: preemptedRunId, + PreemptedJobId: preemptedJobId, + PreemptedJobIdStr: armadaevents.MustUlidStringFromProtoUuid(preemptedJobId), + PreemptedRunId: preemptedRunId, + PreemptedRunIdStr: armadaevents.MustUuidStringFromProtoUuid(preemptedRunId), }, }, }) return sequence, nil } -func CreateSimpleJobFailedEvent(pod *v1.Pod, reason string, clusterId string, cause armadaevents.KubernetesReason) (*armadaevents.EventSequence, error) { - return CreateJobFailedEvent(pod, reason, cause, []*armadaevents.ContainerError{}, clusterId) +func CreateSimpleJobFailedEvent(pod *v1.Pod, reason string, debugMessage string, clusterId string, cause armadaevents.KubernetesReason) (*armadaevents.EventSequence, error) { + return CreateJobFailedEvent(pod, reason, cause, debugMessage, []*armadaevents.ContainerError{}, clusterId) } -func CreateJobFailedEvent(pod *v1.Pod, reason string, cause armadaevents.KubernetesReason, +func CreateJobFailedEvent(pod *v1.Pod, reason string, cause armadaevents.KubernetesReason, debugMessage string, containerStatuses []*armadaevents.ContainerError, clusterId string, ) (*armadaevents.EventSequence, error) { sequence := createEmptySequence(pod) @@ -259,8 +272,10 @@ func CreateJobFailedEvent(pod *v1.Pod, reason string, cause armadaevents.Kuberne Created: &now, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Errors: []*armadaevents.Error{ { Terminal: true, @@ -277,6 +292,7 @@ func CreateJobFailedEvent(pod *v1.Pod, reason string, cause armadaevents.Kuberne PodNumber: getPodNumber(pod), ContainerErrors: containerStatuses, KubernetesReason: cause, + DebugMessage: debugMessage, }, }, }, @@ -307,8 +323,10 @@ func CreateMinimalJobFailedEvent(jobIdStr string, runIdStr string, jobSet string Created: &now, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: runIdStr, + JobId: jobId, + JobIdStr: jobIdStr, Errors: []*armadaevents.Error{ { Terminal: true, @@ -331,7 +349,7 @@ func CreateMinimalJobFailedEvent(jobIdStr string, runIdStr string, jobSet string return sequence, nil } -func CreateReturnLeaseEvent(pod *v1.Pod, reason string, clusterId string, runAttempted bool) (*armadaevents.EventSequence, error) { +func CreateReturnLeaseEvent(pod *v1.Pod, reason string, debugMessage string, clusterId string, runAttempted bool) (*armadaevents.EventSequence, error) { sequence := createEmptySequence(pod) jobId, runId, err := extractIds(pod) if err != nil { @@ -343,8 +361,10 @@ func CreateReturnLeaseEvent(pod *v1.Pod, reason string, clusterId string, runAtt Created: &now, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Errors: []*armadaevents.Error{ { Terminal: true, // EventMessage_LeaseReturned indicates a pod could not be scheduled. @@ -359,6 +379,7 @@ func CreateReturnLeaseEvent(pod *v1.Pod, reason string, clusterId string, runAtt PodNumber: getPodNumber(pod), Message: reason, RunAttempted: runAttempted, + DebugMessage: debugMessage, }, }, }, @@ -381,8 +402,10 @@ func CreateJobUtilisationEvent(pod *v1.Pod, utilisationData *domain.UtilisationD Created: &now, Event: &armadaevents.EventSequence_Event_ResourceUtilisation{ ResourceUtilisation: &armadaevents.ResourceUtilisation{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), ResourceInfo: &armadaevents.KubernetesResourceInfo{ ObjectMeta: &armadaevents.ObjectMeta{ KubernetesId: string(pod.ObjectMeta.UID), diff --git a/internal/executor/reporter/job_event_reporter.go b/internal/executor/reporter/job_event_reporter.go index 8bfb465ca83..002b2d8d833 100644 --- a/internal/executor/reporter/job_event_reporter.go +++ b/internal/executor/reporter/job_event_reporter.go @@ -6,8 +6,8 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" + "k8s.io/utils/clock" clusterContext "github.com/armadaproject/armada/internal/executor/context" domain2 "github.com/armadaproject/armada/internal/executor/domain" @@ -33,7 +33,7 @@ type JobEventReporter struct { jobRunStateStore *job.JobRunStateStore clusterContext clusterContext.ClusterContext - clock clock.Clock + clock clock.WithTicker maxBatchSize int } @@ -41,9 +41,9 @@ func NewJobEventReporter( clusterContext clusterContext.ClusterContext, jobRunState *job.JobRunStateStore, eventSender EventSender, - clock clock.Clock, + clock clock.WithTicker, maxBatchSize int, -) (*JobEventReporter, chan bool) { +) (*JobEventReporter, chan bool, error) { stop := make(chan bool) reporter := &JobEventReporter{ eventSender: eventSender, @@ -56,11 +56,14 @@ func NewJobEventReporter( maxBatchSize: maxBatchSize, } - clusterContext.AddPodEventHandler(reporter.podEventHandler()) + _, err := clusterContext.AddPodEventHandler(reporter.podEventHandler()) + if err != nil { + return nil, nil, err + } go reporter.processEventQueue(stop) - return reporter, stop + return reporter, stop, nil } func (eventReporter *JobEventReporter) podEventHandler() cache.ResourceEventHandlerFuncs { diff --git a/internal/executor/reporter/job_event_reporter_test.go b/internal/executor/reporter/job_event_reporter_test.go index 7d6a8423a00..d90076a24b9 100644 --- a/internal/executor/reporter/job_event_reporter_test.go +++ b/internal/executor/reporter/job_event_reporter_test.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" util2 "github.com/armadaproject/armada/internal/common/util" fakecontext "github.com/armadaproject/armada/internal/executor/context/fake" @@ -48,7 +48,8 @@ func TestRequiresIngressToBeReported_TrueWhenHasIngressButNotIngressReportedAnno } func TestJobEventReporter_SendsEventImmediately_OnceNumberOfWaitingEventsMatchesBatchSize(t *testing.T) { - jobEventReporter, eventSender, _ := setupBatchEventsTest(2) + jobEventReporter, eventSender, _, err := setupBatchEventsTest(2) + require.NoError(t, err) pod1 := createPod(1) pod2 := createPod(2) @@ -66,7 +67,8 @@ func TestJobEventReporter_SendsEventImmediately_OnceNumberOfWaitingEventsMatches } func TestJobEventReporter_SendsAllEventsInBuffer_EachBatchTickInterval(t *testing.T) { - jobEventReporter, eventSender, testClock := setupBatchEventsTest(2) + jobEventReporter, eventSender, testClock, err := setupBatchEventsTest(2) + require.NoError(t, err) pod1 := createPod(1) jobEventReporter.QueueEvent(EventMessage{createFailedEvent(t, pod1), util.ExtractJobRunId(pod1)}, func(err error) {}) @@ -83,18 +85,18 @@ func TestJobEventReporter_SendsAllEventsInBuffer_EachBatchTickInterval(t *testin } func createFailedEvent(t *testing.T, pod *v1.Pod) *armadaevents.EventSequence { - event, err := CreateSimpleJobFailedEvent(pod, "failed", "cluster1", armadaevents.KubernetesReason_AppError) + event, err := CreateSimpleJobFailedEvent(pod, "failed", "", "cluster1", armadaevents.KubernetesReason_AppError) require.NoError(t, err) return event } -func setupBatchEventsTest(batchSize int) (*JobEventReporter, *FakeEventSender, *clock.FakeClock) { +func setupBatchEventsTest(batchSize int) (*JobEventReporter, *FakeEventSender, *clock.FakeClock, error) { executorContext := fakecontext.NewSyncFakeClusterContext() eventSender := NewFakeEventSender() jobRunState := job.NewJobRunStateStore(executorContext) testClock := clock.NewFakeClock(time.Now()) - jobEventReporter, _ := NewJobEventReporter(executorContext, jobRunState, eventSender, testClock, batchSize) - return jobEventReporter, eventSender, testClock + jobEventReporter, _, err := NewJobEventReporter(executorContext, jobRunState, eventSender, testClock, batchSize) + return jobEventReporter, eventSender, testClock, err } func createPod(index int) *v1.Pod { diff --git a/internal/executor/service/cluster_allocation.go b/internal/executor/service/cluster_allocation.go index 19136d010d9..405a7308128 100644 --- a/internal/executor/service/cluster_allocation.go +++ b/internal/executor/service/cluster_allocation.go @@ -48,9 +48,8 @@ func NewClusterAllocationService( func (allocationService *ClusterAllocationService) AllocateSpareClusterCapacity() { // If a health monitor is provided, avoid leasing jobs when the cluster is unhealthy. if allocationService.clusterHealthMonitor != nil { - log := logrus.NewEntry(logrus.New()) if ok, reason, err := allocationService.clusterHealthMonitor.IsHealthy(); err != nil { - logging.WithStacktrace(log, err).Error("failed to check cluster health") + logging.WithStacktrace(logrus.NewEntry(logrus.StandardLogger()), err).Error("failed to check cluster health") return } else if !ok { log.Warnf("cluster is not healthy; will not request more jobs: %s", reason) @@ -120,7 +119,7 @@ func (allocationService *ClusterAllocationService) processFailedJobSubmissions(f } func (allocationService *ClusterAllocationService) sendReturnLeaseEvent(details *job.FailedSubmissionDetails, message string) error { - returnLeaseEvent, err := reporter.CreateReturnLeaseEvent(details.Pod, message, allocationService.clusterId.GetClusterId(), true) + returnLeaseEvent, err := reporter.CreateReturnLeaseEvent(details.Pod, message, "", allocationService.clusterId.GetClusterId(), true) if err != nil { return fmt.Errorf("failed to create return lease event %s", err) } @@ -128,7 +127,7 @@ func (allocationService *ClusterAllocationService) sendReturnLeaseEvent(details } func (allocationService *ClusterAllocationService) sendFailedEvent(details *job.FailedSubmissionDetails, message string) error { - failEvent, err := reporter.CreateSimpleJobFailedEvent(details.Pod, message, allocationService.clusterId.GetClusterId(), armadaevents.KubernetesReason_AppError) + failEvent, err := reporter.CreateSimpleJobFailedEvent(details.Pod, message, "", allocationService.clusterId.GetClusterId(), armadaevents.KubernetesReason_AppError) if err != nil { return fmt.Errorf("failed to create return lease event %s", err) } diff --git a/internal/executor/service/job_requester.go b/internal/executor/service/job_requester.go index 0d8896072ea..0e5a09be52b 100644 --- a/internal/executor/service/job_requester.go +++ b/internal/executor/service/job_requester.go @@ -9,7 +9,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/slices" - util2 "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" executorContext "github.com/armadaproject/armada/internal/executor/context" "github.com/armadaproject/armada/internal/executor/job" @@ -115,7 +114,7 @@ func (r *JobRequester) getUnassignedRunIds(capacityReport *utilisation.ClusterAv // We make the assumption here that JobRunStateStore knows about all job runs and don't reconcile again against kubernetes // This should be a safe assumption - and would be a bug if it was ever not true allJobRuns := r.jobRunStateStore.GetAll() - allJobRunIds = append(allJobRunIds, util2.Map(allJobRuns, func(val *job.RunState) string { + allJobRunIds = append(allJobRunIds, slices.Map(allJobRuns, func(val *job.RunState) string { return val.Meta.RunId })...) diff --git a/internal/executor/service/pod_issue_handler.go b/internal/executor/service/pod_issue_handler.go index 7800fe6d0ce..0c8b48ea99e 100644 --- a/internal/executor/service/pod_issue_handler.go +++ b/internal/executor/service/pod_issue_handler.go @@ -1,14 +1,16 @@ package service import ( + "bytes" "fmt" "sync" "time" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" + "k8s.io/kubectl/pkg/describe" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/executor/configuration" @@ -35,6 +37,7 @@ type podIssue struct { // A copy of the pod when an issue was detected OriginalPodState *v1.Pod Message string + DebugMessage string Retryable bool DeletionRequested bool Type podIssueType @@ -81,7 +84,7 @@ func NewIssueHandler( stateChecksConfig configuration.StateChecksConfiguration, pendingPodChecker podchecks.PodChecker, stuckTerminatingPodExpiry time.Duration, -) *IssueHandler { +) (*IssueHandler, error) { issueHandler := &IssueHandler{ jobRunState: jobRunState, clusterContext: clusterContext, @@ -94,7 +97,7 @@ func NewIssueHandler( clock: clock.RealClock{}, } - clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -104,8 +107,11 @@ func NewIssueHandler( issueHandler.handleDeletedPod(pod) }, }) + if err != nil { + return nil, err + } - return issueHandler + return issueHandler, nil } func (p *IssueHandler) hasIssue(runId string) bool { @@ -216,6 +222,7 @@ func (p *IssueHandler) detectPodIssues(allManagedPods []*v1.Pod) { if action != podchecks.ActionWait { retryable := action == podchecks.ActionRetry message := createStuckPodMessage(retryable, podCheckMessage) + debugMessage := createDebugMessage(podEvents) podIssueType := StuckStartingUp if cause == podchecks.NoNodeAssigned { podIssueType = UnableToSchedule @@ -226,6 +233,7 @@ func (p *IssueHandler) detectPodIssues(allManagedPods []*v1.Pod) { issue := &podIssue{ OriginalPodState: pod.DeepCopy(), Message: message, + DebugMessage: debugMessage, Retryable: retryable, Type: podIssueType, } @@ -239,6 +247,20 @@ func (p *IssueHandler) detectPodIssues(allManagedPods []*v1.Pod) { } } +func createDebugMessage(podEvents []*v1.Event) string { + events := make([]v1.Event, 0, len(podEvents)) + for _, e := range podEvents { + events = append(events, *e) + } + + eventList := v1.EventList{Items: events} + writer := bytes.Buffer{} + prefixWriter := describe.NewPrefixWriter(&writer) + + describe.DescribeEvents(&eventList, prefixWriter) + return writer.String() +} + // Returns true if the pod has been running longer than its activeDeadlineSeconds + grace period func (p *IssueHandler) hasExceededActiveDeadline(pod *v1.Pod) bool { if pod.Spec.ActiveDeadlineSeconds == nil { @@ -326,18 +348,18 @@ func (p *IssueHandler) handlePodIssue(issue *issue) { func (p *IssueHandler) handleNonRetryableJobIssue(issue *issue) { if !issue.RunIssue.Reported { log.Infof("Handling non-retryable issue detected for job %s run %s", issue.RunIssue.JobId, issue.RunIssue.RunId) - message := issue.RunIssue.PodIssue.Message + podIssue := issue.RunIssue.PodIssue events := make([]reporter.EventMessage, 0, 2) - if issue.RunIssue.PodIssue.Type == StuckStartingUp || issue.RunIssue.PodIssue.Type == UnableToSchedule { - unableToScheduleEvent, err := reporter.CreateJobUnableToScheduleEvent(issue.RunIssue.PodIssue.OriginalPodState, message, p.clusterContext.GetClusterId()) + if podIssue.Type == StuckStartingUp || podIssue.Type == UnableToSchedule { + unableToScheduleEvent, err := reporter.CreateJobUnableToScheduleEvent(podIssue.OriginalPodState, podIssue.Message, p.clusterContext.GetClusterId()) if err != nil { log.Errorf("Failed to create unable to schedule event for job %s because %s", issue.RunIssue.JobId, err) return } events = append(events, reporter.EventMessage{Event: unableToScheduleEvent, JobRunId: issue.RunIssue.RunId}) } - failedEvent, err := reporter.CreateSimpleJobFailedEvent(issue.RunIssue.PodIssue.OriginalPodState, message, p.clusterContext.GetClusterId(), issue.RunIssue.PodIssue.Cause) + failedEvent, err := reporter.CreateSimpleJobFailedEvent(podIssue.OriginalPodState, podIssue.Message, podIssue.DebugMessage, p.clusterContext.GetClusterId(), podIssue.Cause) if err != nil { log.Errorf("Failed to create failed event for job %s because %s", issue.RunIssue.JobId, err) return @@ -419,7 +441,14 @@ func (p *IssueHandler) handleRetryableJobIssue(issue *issue) { // When we have our own internal state - we don't need to wait for the pod deletion to complete // We can just mark is to delete in our state and return the lease jobRunAttempted := issue.RunIssue.PodIssue.Type != UnableToSchedule - returnLeaseEvent, err := reporter.CreateReturnLeaseEvent(issue.RunIssue.PodIssue.OriginalPodState, issue.RunIssue.PodIssue.Message, p.clusterContext.GetClusterId(), jobRunAttempted) + + returnLeaseEvent, err := reporter.CreateReturnLeaseEvent( + issue.RunIssue.PodIssue.OriginalPodState, + issue.RunIssue.PodIssue.Message, + issue.RunIssue.PodIssue.DebugMessage, + p.clusterContext.GetClusterId(), + jobRunAttempted, + ) if err != nil { log.Errorf("Failed to create return lease event for job %s because %s", issue.RunIssue.JobId, err) return diff --git a/internal/executor/service/pod_issue_handler_test.go b/internal/executor/service/pod_issue_handler_test.go index 2a754497b1c..dd18064fe57 100644 --- a/internal/executor/service/pod_issue_handler_test.go +++ b/internal/executor/service/pod_issue_handler_test.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" commonutil "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/configuration" @@ -28,7 +28,8 @@ import ( ) func TestPodIssueService_DoesNothingIfNoPodsAreFound(t *testing.T) { - podIssueService, _, _, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, _, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) podIssueService.HandlePodIssues() @@ -36,7 +37,8 @@ func TestPodIssueService_DoesNothingIfNoPodsAreFound(t *testing.T) { } func TestPodIssueService_DoesNothingIfNoStuckPodsAreFound(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) runningPod := makeRunningPod() addPod(t, fakeClusterContext, runningPod) @@ -48,9 +50,11 @@ func TestPodIssueService_DoesNothingIfNoStuckPodsAreFound(t *testing.T) { } func TestPodIssueService_DeletesPodAndReportsFailed_IfStuckAndUnretryable(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) unretryableStuckPod := makeUnretryableStuckPod() addPod(t, fakeClusterContext, unretryableStuckPod) + addPodEvents(fakeClusterContext, unretryableStuckPod, []*v1.Event{{Message: "Image pull has failed", Type: "Warning"}}) podIssueService.HandlePodIssues() @@ -69,10 +73,12 @@ func TestPodIssueService_DeletesPodAndReportsFailed_IfStuckAndUnretryable(t *tes assert.True(t, ok) assert.Len(t, failedEvent.JobRunErrors.Errors, 1) assert.Contains(t, failedEvent.JobRunErrors.Errors[0].GetPodError().Message, "unrecoverable problem") + assert.Contains(t, failedEvent.JobRunErrors.Errors[0].GetPodError().DebugMessage, "Image pull has failed") } func TestPodIssueService_DeletesPodAndReportsFailed_IfStuckTerminating(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) terminatingPod := makeTerminatingPod() addPod(t, fakeClusterContext, terminatingPod) @@ -114,7 +120,8 @@ func TestPodIssueService_DeletesPodAndReportsFailed_IfExceedsActiveDeadline(t *t for name, tc := range tests { t.Run(name, func(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) addPod(t, fakeClusterContext, tc.pod) podIssueService.HandlePodIssues() @@ -138,9 +145,11 @@ func TestPodIssueService_DeletesPodAndReportsFailed_IfExceedsActiveDeadline(t *t } func TestPodIssueService_DeletesPodAndReportsLeaseReturned_IfRetryableStuckPod(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) retryableStuckPod := makeRetryableStuckPod() addPod(t, fakeClusterContext, retryableStuckPod) + addPodEvents(fakeClusterContext, retryableStuckPod, []*v1.Event{{Message: "Some other message", Type: "Warning"}}) podIssueService.HandlePodIssues() @@ -165,10 +174,12 @@ func TestPodIssueService_DeletesPodAndReportsLeaseReturned_IfRetryableStuckPod(t assert.True(t, ok) assert.Len(t, returnedEvent.JobRunErrors.Errors, 1) assert.True(t, returnedEvent.JobRunErrors.Errors[0].GetPodLeaseReturned() != nil) + assert.Contains(t, returnedEvent.JobRunErrors.Errors[0].GetPodLeaseReturned().DebugMessage, "Some other message") } func TestPodIssueService_DeletesPodAndReportsFailed_IfRetryableStuckPodStartsUpAfterDeletionCalled(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) retryableStuckPod := makeRetryableStuckPod() addPod(t, fakeClusterContext, retryableStuckPod) @@ -205,7 +216,8 @@ func TestPodIssueService_DeletesPodAndReportsFailed_IfRetryableStuckPodStartsUpA } func TestPodIssueService_ReportsFailed_IfDeletedExternally(t *testing.T) { - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{}) + require.NoError(t, err) runningPod := makeRunningPod() protoJobId, err := armadaevents.ProtoUuidFromUlidString(util.ExtractJobId(runningPod)) require.NoError(t, err) @@ -230,7 +242,8 @@ func TestPodIssueService_ReportsFailed_IfPodOfActiveRunGoesMissing(t *testing.T) protoJobId, err := armadaevents.ProtoUuidFromUlidString(jobId) require.NoError(t, err) - podIssueService, _, _, eventsReporter := setupTestComponents([]*job.RunState{createRunState(jobId, uuid.New().String(), job.Active)}) + podIssueService, _, _, eventsReporter, err := setupTestComponents([]*job.RunState{createRunState(jobId, uuid.New().String(), job.Active)}) + require.NoError(t, err) podIssueService.clock = fakeClock podIssueService.HandlePodIssues() @@ -254,7 +267,8 @@ func TestPodIssueService_DoesNothing_IfMissingPodOfActiveRunReturns(t *testing.T fakeClock := clock.NewFakeClock(baseTime) runningPod := makeRunningPod() runState := createRunState(util.ExtractJobId(runningPod), util.ExtractJobRunId(runningPod), job.Active) - podIssueService, _, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{runState}) + podIssueService, _, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{runState}) + require.NoError(t, err) podIssueService.clock = fakeClock podIssueService.HandlePodIssues() @@ -270,7 +284,8 @@ func TestPodIssueService_DoesNothing_IfMissingPodOfActiveRunReturns(t *testing.T func TestPodIssueService_DeleteRunFromRunState_IfSubmittedPodNeverAppears(t *testing.T) { baseTime := time.Now() fakeClock := clock.NewFakeClock(baseTime) - podIssueService, runStateStore, _, eventsReporter := setupTestComponents([]*job.RunState{createRunState("job-1", "run-1", job.SuccessfulSubmission)}) + podIssueService, runStateStore, _, eventsReporter, err := setupTestComponents([]*job.RunState{createRunState("job-1", "run-1", job.SuccessfulSubmission)}) + require.NoError(t, err) podIssueService.clock = fakeClock podIssueService.HandlePodIssues() @@ -290,7 +305,8 @@ func TestPodIssueService_DoesNothing_IfSubmittedPodAppears(t *testing.T) { fakeClock := clock.NewFakeClock(baseTime) runningPod := makeRunningPod() runState := createRunState(util.ExtractJobId(runningPod), util.ExtractJobRunId(runningPod), job.SuccessfulSubmission) - podIssueService, runStateStore, fakeClusterContext, eventsReporter := setupTestComponents([]*job.RunState{runState}) + podIssueService, runStateStore, fakeClusterContext, eventsReporter, err := setupTestComponents([]*job.RunState{runState}) + require.NoError(t, err) podIssueService.clock = fakeClock podIssueService.HandlePodIssues() @@ -304,7 +320,7 @@ func TestPodIssueService_DoesNothing_IfSubmittedPodAppears(t *testing.T) { assert.Len(t, runStateStore.GetAll(), 1) } -func setupTestComponents(initialRunState []*job.RunState) (*IssueHandler, *job.JobRunStateStore, *fakecontext.SyncFakeClusterContext, *mocks.FakeEventReporter) { +func setupTestComponents(initialRunState []*job.RunState) (*IssueHandler, *job.JobRunStateStore, *fakecontext.SyncFakeClusterContext, *mocks.FakeEventReporter, error) { fakeClusterContext := fakecontext.NewSyncFakeClusterContext() eventReporter := mocks.NewFakeEventReporter() pendingPodChecker := makePodChecker() @@ -314,7 +330,7 @@ func setupTestComponents(initialRunState []*job.RunState) (*IssueHandler, *job.J DeadlineForActivePodConsideredMissing: time.Minute * 5, } - podIssueHandler := NewIssueHandler( + podIssueHandler, err := NewIssueHandler( runStateStore, fakeClusterContext, eventReporter, @@ -323,7 +339,7 @@ func setupTestComponents(initialRunState []*job.RunState) (*IssueHandler, *job.J time.Minute*3, ) - return podIssueHandler, runStateStore, fakeClusterContext, eventReporter + return podIssueHandler, runStateStore, fakeClusterContext, eventReporter, err } func createRunState(jobId string, runId string, phase job.RunPhase) *job.RunState { @@ -447,3 +463,7 @@ func addPod(t *testing.T, fakeClusterContext context.ClusterContext, runningPod t.Error(err) } } + +func addPodEvents(fakeClusterContext *fakecontext.SyncFakeClusterContext, pod *v1.Pod, events []*v1.Event) { + fakeClusterContext.Events[util.ExtractJobId(pod)] = events +} diff --git a/internal/executor/service/resource_cleanup.go b/internal/executor/service/resource_cleanup.go index e948b37b461..fa432fed2ec 100644 --- a/internal/executor/service/resource_cleanup.go +++ b/internal/executor/service/resource_cleanup.go @@ -21,7 +21,7 @@ type ResourceCleanupService struct { func NewResourceCleanupService( clusterContext clusterContext.ClusterContext, kubernetesConfiguration configuration.KubernetesConfiguration, -) *ResourceCleanupService { +) (*ResourceCleanupService, error) { service := &ResourceCleanupService{ clusterContext: clusterContext, kubernetesConfiguration: kubernetesConfiguration, @@ -37,7 +37,7 @@ func NewResourceCleanupService( So in the case the cleanup below fails, the ownerreference will ensure it is cleaned up when the pod is */ - clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { pod, ok := newObj.(*v1.Pod) if !ok { @@ -49,8 +49,11 @@ func NewResourceCleanupService( } }, }) + if err != nil { + return nil, err + } - return service + return service, nil } func (i *ResourceCleanupService) removeAnyAssociatedIngress(pod *v1.Pod) { diff --git a/internal/executor/service/resource_cleanup_test.go b/internal/executor/service/resource_cleanup_test.go index b228a7e5c3f..bd93e4e1478 100644 --- a/internal/executor/service/resource_cleanup_test.go +++ b/internal/executor/service/resource_cleanup_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -18,7 +19,8 @@ import ( ) func TestCleanUpResources_RemovesExpiredResources(t *testing.T) { - s := createResourceCleanupService(time.Second, time.Second, 10) + s, err := createResourceCleanupService(time.Second, time.Second, 10) + require.NoError(t, err) now := time.Now() succeededExpiredPod := makeFinishedPodWithTimestamp(v1.PodSucceeded, now.Add(-1*time.Minute)) @@ -33,7 +35,8 @@ func TestCleanUpResources_RemovesExpiredResources(t *testing.T) { } func TestCleanUpResources_LeavesNonExpiredPods(t *testing.T) { - s := createResourceCleanupService(time.Minute*5, time.Minute*5, 10) + s, err := createResourceCleanupService(time.Minute*5, time.Minute*5, 10) + require.NoError(t, err) now := time.Now() succeededNonExpiredPod := makeFinishedPodWithTimestamp(v1.PodSucceeded, now.Add(-1*time.Minute)) @@ -50,7 +53,8 @@ func TestCleanUpResources_LeavesNonExpiredPods(t *testing.T) { } func TestCleanUpResources_RemovesNonExpiredPodsOverMaxTerminatedPodLimit(t *testing.T) { - s := createResourceCleanupService(time.Minute*5, time.Minute*5, 1) + s, err := createResourceCleanupService(time.Minute*5, time.Minute*5, 1) + require.NoError(t, err) now := time.Now() succeededNonExpiredPod := makeFinishedPodWithTimestamp(v1.PodSucceeded, now.Add(-1*time.Minute)) @@ -66,7 +70,8 @@ func TestCleanUpResources_RemovesNonExpiredPodsOverMaxTerminatedPodLimit(t *test } func TestCanBeRemovedConditions(t *testing.T) { - s := createResourceCleanupService(time.Second, time.Second, 1) + s, err := createResourceCleanupService(time.Second, time.Second, 1) + require.NoError(t, err) pods := map[*v1.Pod]bool{ // should not be cleaned yet makePodWithCurrentStateReported(v1.PodRunning, false): false, @@ -85,7 +90,8 @@ func TestCanBeRemovedConditions(t *testing.T) { } func TestCanBeRemovedMinimumPodTime(t *testing.T) { - s := createResourceCleanupService(5*time.Minute, 10*time.Minute, 1) + s, err := createResourceCleanupService(5*time.Minute, 10*time.Minute, 1) + require.NoError(t, err) now := time.Now() pods := map[*v1.Pod]bool{ // should not be cleaned yet @@ -228,7 +234,7 @@ func addPods(t *testing.T, clusterContext clusterContext.ClusterContext, pods .. } } -func createResourceCleanupService(minimumPodAge, failedPodExpiry time.Duration, maxTerminatedPods int) *ResourceCleanupService { +func createResourceCleanupService(minimumPodAge, failedPodExpiry time.Duration, maxTerminatedPods int) (*ResourceCleanupService, error) { fakeClusterContext := fake.NewSyncFakeClusterContext() kubernetesConfig := configuration.KubernetesConfiguration{ MinimumPodAge: minimumPodAge, diff --git a/internal/executor/util/event_util.go b/internal/executor/util/event_util.go deleted file mode 100644 index 126adc65ce6..00000000000 --- a/internal/executor/util/event_util.go +++ /dev/null @@ -1,11 +0,0 @@ -package util - -import v1 "k8s.io/api/core/v1" - -const ( - EventReasonPreempted = "Preempted" -) - -func IsPreemptedEvent(event *v1.Event) bool { - return event.Reason == EventReasonPreempted -} diff --git a/internal/executor/util/ingress_service_config.go b/internal/executor/util/ingress_service_config.go index 632ca83b49b..2f72f7147d6 100644 --- a/internal/executor/util/ingress_service_config.go +++ b/internal/executor/util/ingress_service_config.go @@ -1,7 +1,9 @@ package util import ( - "github.com/armadaproject/armada/internal/common/util" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "github.com/armadaproject/armada/pkg/api" ) @@ -29,8 +31,8 @@ type IngressServiceConfig struct { func deepCopy(config *IngressServiceConfig) *IngressServiceConfig { return &IngressServiceConfig{ Type: config.Type, - Ports: util.DeepCopyListUint32(config.Ports), - Annotations: util.DeepCopy(config.Annotations), + Ports: slices.Clone(config.Ports), + Annotations: maps.Clone(config.Annotations), TlsEnabled: config.TlsEnabled, CertName: config.CertName, UseClusterIp: config.UseClusterIp, @@ -45,8 +47,8 @@ func CombineIngressService(ingresses []*api.IngressConfig, services []*api.Servi result, &IngressServiceConfig{ Type: Ingress, - Ports: util.DeepCopyListUint32(ing.Ports), - Annotations: util.DeepCopy(ing.Annotations), + Ports: slices.Clone(ing.Ports), + Annotations: maps.Clone(ing.Annotations), TlsEnabled: ing.TlsEnabled, CertName: ing.CertName, UseClusterIp: ing.UseClusterIP, @@ -65,7 +67,7 @@ func CombineIngressService(ingresses []*api.IngressConfig, services []*api.Servi result, &IngressServiceConfig{ Type: svcType, - Ports: util.DeepCopyListUint32(svc.Ports), + Ports: slices.Clone(svc.Ports), UseClusterIp: useClusterIP, }, ) diff --git a/internal/executor/util/job_util.go b/internal/executor/util/job_util.go deleted file mode 100644 index f101f4845e2..00000000000 --- a/internal/executor/util/job_util.go +++ /dev/null @@ -1,52 +0,0 @@ -package util - -import ( - "regexp" - - "github.com/pkg/errors" -) - -var ( - armadaJobRegex = regexp.MustCompile(`armada-([a-z0-9]+)-0`) - preemptedMessageRegex = regexp.MustCompile(`Preempted by ([-a-zA-Z0-9]+)/([-a-zA-Z0-9]+) on node ([-a-zA-Z0-9]+)`) - invalidJobIdFormat = "invalid name format: expected 'armada--0(-)', received '%s'" - invalidPreemptionMessageFormat = "invalid preemption message: " + - "expected 'Preempted by ([-a-zA-Z0-9]+)/([-a-zA-Z0-9]+) on node ([-a-zA-Z0-9]+)', received '%s'" -) - -type PreemptiveJobInfo struct { - Namespace string - Name string - Node string -} - -// ParsePreemptionMessage parses the message field from a Preempted Cluster Event -// Message format is 'Preempted by / on node ' -func ParsePreemptionMessage(msg string) (*PreemptiveJobInfo, error) { - res := preemptedMessageRegex.FindAllStringSubmatch(msg, -1) - if len(res) != 1 || len(res[0]) != 4 { - return nil, errors.Errorf( - invalidPreemptionMessageFormat, - msg, - ) - } - - info := &PreemptiveJobInfo{ - Namespace: res[0][1], - Name: res[0][2], - Node: res[0][3], - } - - return info, nil -} - -// ExtractJobIdFromName extracts job id from the Armada Job pod -// Pods are named using the convention armada--0(-) -func ExtractJobIdFromName(name string) (string, error) { - res := armadaJobRegex.FindAllStringSubmatch(name, -1) - if len(res) != 1 || len(res[0]) != 2 { - return "", errors.Errorf(invalidJobIdFormat, name) - } - - return res[0][1], nil -} diff --git a/internal/executor/util/job_util_test.go b/internal/executor/util/job_util_test.go deleted file mode 100644 index 03f824721c8..00000000000 --- a/internal/executor/util/job_util_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestExtractJobIdFromName(t *testing.T) { - validInput := "armada-01gd3fet6mf1g9km91ajhg208g-0" - expected1 := "01gd3fet6mf1g9km91ajhg208g" - extracted, err := ExtractJobIdFromName(validInput) - assert.Nil(t, err) - assert.Equal(t, extracted, expected1) - - invalidInput := "armada-2131fdsf-fesgs3-0" - extracted, err = ExtractJobIdFromName(invalidInput) - assert.Empty(t, extracted) - assert.EqualError(t, err, "invalid name format: expected 'armada--0(-)', received 'armada-2131fdsf-fesgs3-0'") - - invalidInput = "really-bad-input" - extracted, err = ExtractJobIdFromName(invalidInput) - assert.Empty(t, extracted) - assert.EqualError(t, err, "invalid name format: expected 'armada--0(-)', received 'really-bad-input'") -} - -func TestParsePreemptionMessage(t *testing.T) { - validInput := "Preempted by some-namespace-1/armada-feuwet4nui43nfekjng-0 on node test-node" - expected := &PreemptiveJobInfo{ - Namespace: "some-namespace-1", - Name: "armada-feuwet4nui43nfekjng-0", - Node: "test-node", - } - extracted, err := ParsePreemptionMessage(validInput) - assert.Nil(t, err) - assert.Equal(t, extracted, expected) - - invalidInput := "Preempted by armada-feuwet4nui43nfekjng-0 on node test-node" - extracted, err = ParsePreemptionMessage(invalidInput) - assert.Nil(t, extracted) - assert.EqualError( - t, - err, - "invalid preemption message: expected 'Preempted by ([-a-zA-Z0-9]+)/([-a-zA-Z0-9]+) on node ([-a-zA-Z0-9]+)', "+ - "received 'Preempted by armada-feuwet4nui43nfekjng-0 on node test-node'", - ) - - invalidInput = "really bad input" - extracted, err = ParsePreemptionMessage(invalidInput) - assert.Nil(t, extracted) - assert.EqualError( - t, - err, - "invalid preemption message: expected 'Preempted by ([-a-zA-Z0-9]+)/([-a-zA-Z0-9]+) on node ([-a-zA-Z0-9]+)', "+ - "received 'really bad input'", - ) -} diff --git a/internal/executor/util/pod_util.go b/internal/executor/util/pod_util.go index 05baa8573c6..c41a626418e 100644 --- a/internal/executor/util/pod_util.go +++ b/internal/executor/util/pod_util.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "strconv" - "strings" "time" log "github.com/sirupsen/logrus" @@ -12,7 +11,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" - "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/executor/domain" ) @@ -304,10 +302,6 @@ func HasCurrentStateBeenReported(pod *v1.Pod) bool { return annotationPresent } -func IsArmadaJobPod(name string) bool { - return strings.HasPrefix(name, common.PodNamePrefix) -} - func CountPodsByPhase(pods []*v1.Pod) map[string]uint32 { pods = RemoveDuplicates(pods) result := map[string]uint32{} diff --git a/internal/executor/utilisation/cluster_utilisation.go b/internal/executor/utilisation/cluster_utilisation.go index faa24593251..ca2b57ea666 100644 --- a/internal/executor/utilisation/cluster_utilisation.go +++ b/internal/executor/utilisation/cluster_utilisation.go @@ -54,7 +54,7 @@ func NewClusterUtilisationService( } type NodeGroupAllocationInfo struct { - NodeType *node.NodeTypeIdentifier + NodeType string Nodes []*v1.Node NodeGroupCapacity armadaresource.ComputeResources NodeGroupAllocatableCapacity armadaresource.ComputeResources @@ -135,7 +135,7 @@ func (cls *ClusterUtilisationService) GetAvailableClusterCapacity() (*ClusterAva NonArmadaAllocatedResources: nodeNonArmadaAllocatedResources, Unschedulable: !isSchedulable, ResourceUsageByQueue: resourceUsageByQueue, - NodeType: cls.nodeInfoService.GetType(node).Id, + NodeType: cls.nodeInfoService.GetType(node), }) } @@ -261,7 +261,7 @@ func (clusterUtilisationService *ClusterUtilisationService) GetAllNodeGroupAlloc for _, nodeGroup := range nodeGroups { totalNodeResource := armadaresource.CalculateTotalResource(nodeGroup.Nodes) - allocatableNodeResource := allocatableResourceByNodeType[nodeGroup.NodeType.Id] + allocatableNodeResource := allocatableResourceByNodeType[nodeGroup.NodeType] cordonedNodeResource := getCordonedResource(nodeGroup.Nodes, batchPods) result = append(result, &NodeGroupAllocationInfo{ @@ -318,7 +318,7 @@ func (clusterUtilisationService *ClusterUtilisationService) getAllocatableResour totalNodeGroupResource := armadaresource.CalculateTotalResource(nodeGroup.Nodes) allocatableNodeGroupResource := totalNodeGroupResource.DeepCopy() allocatableNodeGroupResource.Sub(unmanagedPodResource) - result[nodeGroup.NodeType.Id] = allocatableNodeGroupResource + result[nodeGroup.NodeType] = allocatableNodeGroupResource } return result, nil diff --git a/internal/executor/utilisation/job_utilisation_reporter.go b/internal/executor/utilisation/job_utilisation_reporter.go index d844f1be6a7..d40ce0919ae 100644 --- a/internal/executor/utilisation/job_utilisation_reporter.go +++ b/internal/executor/utilisation/job_utilisation_reporter.go @@ -35,7 +35,7 @@ func NewUtilisationEventReporter( podUtilisation PodUtilisationService, eventReporter reporter.EventReporter, reportingPeriod time.Duration, -) *UtilisationEventReporter { +) (*UtilisationEventReporter, error) { r := &UtilisationEventReporter{ clusterContext: clusterContext, podUtilisation: podUtilisation, @@ -44,7 +44,7 @@ func NewUtilisationEventReporter( podInfo: map[string]*podUtilisationInfo{}, } - clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := clusterContext.AddPodEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -70,7 +70,10 @@ func NewUtilisationEventReporter( go r.deletePod(pod) }, }) - return r + if err != nil { + return nil, err + } + return r, nil } func (r *UtilisationEventReporter) ReportUtilisationEvents() { diff --git a/internal/executor/utilisation/job_utilisation_reporter_test.go b/internal/executor/utilisation/job_utilisation_reporter_test.go index 2e96ad63dfe..d41d4b31789 100644 --- a/internal/executor/utilisation/job_utilisation_reporter_test.go +++ b/internal/executor/utilisation/job_utilisation_reporter_test.go @@ -37,8 +37,9 @@ func TestUtilisationEventReporter_ReportUtilisationEvents(t *testing.T) { fakeEventReporter := &mocks.FakeEventReporter{} fakeUtilisationService := &fakePodUtilisationService{data: &testPodResources} - eventReporter := NewUtilisationEventReporter(clusterContext, fakeUtilisationService, fakeEventReporter, reportingPeriod) - _, err := submitPod(clusterContext) + eventReporter, err := NewUtilisationEventReporter(clusterContext, fakeUtilisationService, fakeEventReporter, reportingPeriod) + require.NoError(t, err) + _, err = submitPod(clusterContext) require.NoError(t, err) deadline := time.Now().Add(time.Second) @@ -76,8 +77,9 @@ func TestUtilisationEventReporter_ReportUtilisationEvents_WhenNoUtilisationData( fakeEventReporter := &mocks.FakeEventReporter{} fakeUtilisationService := &fakePodUtilisationService{data: domain.EmptyUtilisationData()} - eventReporter := NewUtilisationEventReporter(clusterContext, fakeUtilisationService, fakeEventReporter, reportingPeriod) - _, err := submitPod(clusterContext) + eventReporter, err := NewUtilisationEventReporter(clusterContext, fakeUtilisationService, fakeEventReporter, reportingPeriod) + require.NoError(t, err) + _, err = submitPod(clusterContext) require.NoError(t, err) deadline := time.Now().Add(time.Millisecond * 500) diff --git a/internal/executor/utilisation/pod_utilisation_custom_metrics.go b/internal/executor/utilisation/pod_utilisation_custom_metrics.go index 8af57400b74..dffaf5c25c5 100644 --- a/internal/executor/utilisation/pod_utilisation_custom_metrics.go +++ b/internal/executor/utilisation/pod_utilisation_custom_metrics.go @@ -8,7 +8,7 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/executor/configuration" clusterContext "github.com/armadaproject/armada/internal/executor/context" diff --git a/internal/jobservice/application.go b/internal/jobservice/application.go index 596a64ae9bd..2ae6be4a593 100644 --- a/internal/jobservice/application.go +++ b/internal/jobservice/application.go @@ -11,7 +11,7 @@ import ( "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "github.com/armadaproject/armada/internal/common/auth/authorization" + "github.com/armadaproject/armada/internal/common/auth" grpcCommon "github.com/armadaproject/armada/internal/common/grpc" grpcconfig "github.com/armadaproject/armada/internal/common/grpc/configuration" "github.com/armadaproject/armada/internal/common/grpc/grpcpool" @@ -110,7 +110,7 @@ func (a *App) StartUp(ctx context.Context, config *configuration.JobServiceConfi grpcServer := grpcCommon.CreateGrpcServer( config.Grpc.KeepaliveParams, config.Grpc.KeepaliveEnforcementPolicy, - []authorization.AuthService{&authorization.AnonymousAuthService{}}, + []auth.AuthService{&auth.AnonymousAuthService{}}, config.Grpc.Tls, ) diff --git a/internal/jobservice/eventstojobs/event_job_response.go b/internal/jobservice/eventstojobs/event_job_response.go index 3559de47fec..543b3900bfe 100644 --- a/internal/jobservice/eventstojobs/event_job_response.go +++ b/internal/jobservice/eventstojobs/event_job_response.go @@ -11,8 +11,6 @@ func EventsToJobResponse(message api.EventMessage) *js.JobServiceResponse { switch message.Events.(type) { case *api.EventMessage_Submitted: return &js.JobServiceResponse{State: js.JobServiceResponse_SUBMITTED} - case *api.EventMessage_DuplicateFound: - return &js.JobServiceResponse{State: js.JobServiceResponse_DUPLICATE_FOUND} case *api.EventMessage_Running: return &js.JobServiceResponse{State: js.JobServiceResponse_RUNNING} case *api.EventMessage_Failed: @@ -29,7 +27,7 @@ func EventsToJobResponse(message api.EventMessage) *js.JobServiceResponse { // Check if api.EventMessage is terminal event func IsEventTerminal(message api.EventMessage) bool { switch message.Events.(type) { - case *api.EventMessage_DuplicateFound, *api.EventMessage_Cancelled, *api.EventMessage_Succeeded, *api.EventMessage_Failed: + case *api.EventMessage_Cancelled, *api.EventMessage_Succeeded, *api.EventMessage_Failed: return true default: return false diff --git a/internal/jobservice/eventstojobs/event_job_response_test.go b/internal/jobservice/eventstojobs/event_job_response_test.go index 267430e9996..7b857058d9a 100644 --- a/internal/jobservice/eventstojobs/event_job_response_test.go +++ b/internal/jobservice/eventstojobs/event_job_response_test.go @@ -25,10 +25,6 @@ func TestIsEventResponse(t *testing.T) { eventMessage: api.EventMessage{Events: &api.EventMessage_Submitted{}}, jobResponse: &jobservice.JobServiceResponse{State: jobservice.JobServiceResponse_SUBMITTED}, }, - { - eventMessage: api.EventMessage{Events: &api.EventMessage_DuplicateFound{}}, - jobResponse: &jobservice.JobServiceResponse{State: jobservice.JobServiceResponse_DUPLICATE_FOUND}, - }, { eventMessage: api.EventMessage{Events: &api.EventMessage_Running{}}, jobResponse: &jobservice.JobServiceResponse{State: jobservice.JobServiceResponse_RUNNING}, @@ -61,10 +57,6 @@ func TestIsEventResponse(t *testing.T) { eventMessage: api.EventMessage{Events: &api.EventMessage_IngressInfo{}}, jobResponse: nil, }, - { - eventMessage: api.EventMessage{Events: &api.EventMessage_Updated{}}, - jobResponse: nil, - }, { eventMessage: api.EventMessage{Events: &api.EventMessage_LeaseExpired{}}, jobResponse: nil, @@ -99,7 +91,7 @@ func TestIsEventResponse(t *testing.T) { }, } length := len(eventMessages) - assert.Equal(t, length, 19) + assert.Equal(t, length, 17) for i := range eventMessages { jobResponse := EventsToJobResponse(eventMessages[i].eventMessage) assert.Equal(t, jobResponse, eventMessages[i].jobResponse) @@ -112,10 +104,6 @@ func TestIsTerminalEvent(t *testing.T) { eventMessage: api.EventMessage{Events: &api.EventMessage_Submitted{}}, jobServiceEvent: false, }, - { - eventMessage: api.EventMessage{Events: &api.EventMessage_DuplicateFound{}}, - jobServiceEvent: true, - }, { eventMessage: api.EventMessage{Events: &api.EventMessage_Running{}}, jobServiceEvent: false, @@ -148,10 +136,6 @@ func TestIsTerminalEvent(t *testing.T) { eventMessage: api.EventMessage{Events: &api.EventMessage_IngressInfo{}}, jobServiceEvent: false, }, - { - eventMessage: api.EventMessage{Events: &api.EventMessage_Updated{}}, - jobServiceEvent: false, - }, { eventMessage: api.EventMessage{Events: &api.EventMessage_LeaseExpired{}}, jobServiceEvent: false, @@ -186,7 +170,7 @@ func TestIsTerminalEvent(t *testing.T) { }, } length := len(eventMessages) - assert.Equal(t, length, 19) + assert.Equal(t, length, 17) for i := range eventMessages { jobResponse := IsEventTerminal(eventMessages[i].eventMessage) assert.Equal(t, jobResponse, eventMessages[i].jobServiceEvent) diff --git a/internal/jobservice/eventstojobs/manage_subs.go b/internal/jobservice/eventstojobs/manage_subs.go index fa509e1adc3..81717e1cf1e 100644 --- a/internal/jobservice/eventstojobs/manage_subs.go +++ b/internal/jobservice/eventstojobs/manage_subs.go @@ -2,9 +2,9 @@ package eventstojobs import ( "context" + "errors" "fmt" "io" - "strings" "sync" "time" @@ -276,6 +276,8 @@ func (js *JobSetSubscription) Subscribe() error { log.WithFields(requestFields).Debugf("Called cancel") }() + shouldReconnect := false + // this loop will run until the context is canceled for { select { @@ -283,12 +285,29 @@ func (js *JobSetSubscription) Subscribe() error { log.WithFields(requestFields).Debug("context is done") return nil case <-nextRecv: + + if shouldReconnect { + stream, err = js.eventReader.GetJobEventMessage(js.ctx, &api.JobSetRequest{ + Id: js.JobSetId, + Queue: js.Queue, + Watch: true, + FromMessageId: js.fromMessageId, + }) + // Treat stream creation errors as terminal, like we do in Subscribe() + if err != nil { + log.WithFields(requestFields).WithError(err).Error("error from GetJobEventMessage") + return err + } + + shouldReconnect = false + } + msg, err := stream.Recv() if err != nil { - if strings.Contains(err.Error(), io.EOF.Error()) { + if err == io.EOF { log.WithFields(requestFields).Info("Reached stream end for JobSetSubscription") return nil - } else if strings.Contains(err.Error(), "context canceled") { + } else if errors.Is(err, context.Canceled) { // The select case will handle context being done/canceled. continue } @@ -300,6 +319,8 @@ func (js *JobSetSubscription) Subscribe() error { log.WithFields(requestFields).WithError(settingSubscribeErr).Error("could not set error field in job set table") } nextRecv = time.After(5 * time.Second) + // Consider this stream dead + shouldReconnect = true continue } diff --git a/internal/jobservice/eventstojobs/manage_subs_test.go b/internal/jobservice/eventstojobs/manage_subs_test.go index 7864ca2b4a1..77d1f842464 100644 --- a/internal/jobservice/eventstojobs/manage_subs_test.go +++ b/internal/jobservice/eventstojobs/manage_subs_test.go @@ -42,11 +42,7 @@ func (m *MockEventClient) Recv() (*api.EventStreamMessage, error) { return nil, io.EOF } - // We only want to return the error once. - defer func() { - m.err = nil - }() - + // Always return the error, they are terminal return msg, m.err } @@ -79,23 +75,23 @@ func TestJobSetSubscriptionSubscribe(t *testing.T) { name string isJobSetSubscribedFn func(context.Context, string, string) (bool, string, error) ttlSecs time.Duration - err error + eventClients []MockEventClient wantErr bool wantSubscriptionErr bool }{ { - name: "no error after expiration if messages are received", - ttlSecs: time.Second, - err: nil, + name: "no error after expiration if messages are received", + ttlSecs: time.Second, + eventClients: []MockEventClient{{}}, isJobSetSubscribedFn: func(context.Context, string, string) (bool, string, error) { return true, "", nil }, wantErr: false, }, { - name: "client errors and sets subscription error, but can continue on and exit normally", - ttlSecs: time.Second * 10, - err: errors.New("some error"), + name: "client errors and sets subscription error, reconnects to continue on and exit normally", + ttlSecs: time.Second * 10, + eventClients: []MockEventClient{{err: errors.New("some error")}, {}}, isJobSetSubscribedFn: func(context.Context, string, string) (bool, string, error) { return true, "", nil }, @@ -103,9 +99,9 @@ func TestJobSetSubscriptionSubscribe(t *testing.T) { wantSubscriptionErr: true, }, { - name: "it exits without error when job unsubscribes", - ttlSecs: time.Second, - err: nil, + name: "it exits without error when job unsubscribes", + ttlSecs: time.Second, + eventClients: []MockEventClient{{}}, isJobSetSubscribedFn: func(context.Context, string, string) (bool, string, error) { return false, "", nil }, @@ -117,15 +113,16 @@ func TestJobSetSubscriptionSubscribe(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - eventClient := MockEventClient{ - err: tt.err, + eventClientIndex := 0 + getEventClientFun := func(ctx context.Context, jobReq *api.JobSetRequest) (api.Event_GetJobSetEventsClient, error) { + eventClient := tt.eventClients[eventClientIndex] + eventClientIndex++ + return &eventClient, nil } mockJobEventReader := events.JobEventReaderMock{ - GetJobEventMessageFunc: func(context.Context, *api.JobSetRequest) (api.Event_GetJobSetEventsClient, error) { - return &eventClient, nil - }, - CloseFunc: func() {}, + GetJobEventMessageFunc: getEventClientFun, + CloseFunc: func() {}, } mockJobRepo := repository.SQLJobServiceMock{ @@ -169,7 +166,7 @@ func TestJobSetSubscriptionSubscribe(t *testing.T) { } if tt.wantSubscriptionErr { assert.True(t, len(mockJobRepo.SetSubscriptionErrorCalls()) > 0) - assert.Equal(t, 2, len(mockJobRepo.AddMessageIdAndClearSubscriptionErrorCalls())) + assert.Equal(t, 3, len(mockJobRepo.AddMessageIdAndClearSubscriptionErrorCalls())) } else { assert.Equal(t, 0, len(mockJobRepo.SetSubscriptionErrorCalls())) assert.True(t, len(mockJobRepo.AddMessageIdAndClearSubscriptionErrorCalls()) > 0) @@ -212,7 +209,7 @@ func TestJobSetSubscriptionExecutor(t *testing.T) { &mockJobEventReader, &mockJobRepo, jobSubChan, - time.Duration(time.Second), + time.Second, ) go executor.Manage() diff --git a/internal/lookout/ui/src/App.tsx b/internal/lookout/ui/src/App.tsx index 8e84fa7b66a..8615a369c24 100644 --- a/internal/lookout/ui/src/App.tsx +++ b/internal/lookout/ui/src/App.tsx @@ -5,7 +5,7 @@ import { createGenerateClassName } from "@material-ui/core/styles" import { ThemeProvider as ThemeProviderV5, createTheme as createThemeV5 } from "@mui/material/styles" import { JobsTableContainer } from "containers/lookoutV2/JobsTableContainer" import { SnackbarProvider } from "notistack" -import { UserManager, WebStorageStateStore, UserManagerSettings } from "oidc-client-ts" +import { UserManager, WebStorageStateStore, UserManagerSettings, User } from "oidc-client-ts" import { BrowserRouter, Navigate, Route, Routes, useNavigate } from "react-router-dom" import { IGetJobsService } from "services/lookoutV2/GetJobsService" import { IGroupJobsService } from "services/lookoutV2/GroupJobsService" @@ -18,7 +18,7 @@ import JobSetsContainer from "./containers/JobSetsContainer" import { UserManagerContext, useUserManager } from "./oidc" import { ICordonService } from "./services/lookoutV2/CordonService" import { IGetJobSpecService } from "./services/lookoutV2/GetJobSpecService" -import { IGetRunErrorService } from "./services/lookoutV2/GetRunErrorService" +import { IGetRunInfoService } from "./services/lookoutV2/GetRunInfoService" import { ILogService } from "./services/lookoutV2/LogService" import { CommandSpec } from "./utils" import { OidcConfig } from "./utils" @@ -68,7 +68,7 @@ type AppProps = { oidcConfig?: OidcConfig v2GetJobsService: IGetJobsService v2GroupJobsService: IGroupJobsService - v2RunErrorService: IGetRunErrorService + v2RunInfoService: IGetRunInfoService v2JobSpecService: IGetJobSpecService v2LogService: ILogService v2UpdateJobsService: UpdateJobsService @@ -147,6 +147,7 @@ export function createUserManager(config: OidcConfig): UserManager { redirect_uri: `${window.location.origin}/oidc`, scope: config.scope, userStore: new WebStorageStateStore({ store: window.localStorage }), + loadUserInfo: true, } return new UserManager(userManagerSettings) @@ -159,11 +160,18 @@ const V2Redirect = withRouter(({ router }) => (undefined) const [isAuthenticated, setIsAuthenticated] = useState(false) + const [username, setUsername] = useState(undefined) useEffect(() => { if (!userManager && props.oidcConfig) { const userManagerInstance = createUserManager(props.oidcConfig) setUserManager(userManagerInstance) + + userManagerInstance.getUser().then((user: User | null) => { + if (user) { + setUsername(user.profile.sub) + } + }) } }, [props.oidcConfig]) @@ -186,7 +194,7 @@ export function App(props: AppProps): JSX.Element {
- +
+ {username && ( + + Welcome, {username}! + + )} ) diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx index 61b066bd3a0..4f88ee85fcc 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx @@ -7,7 +7,7 @@ import { makeTestJob } from "utils/fakeJobsUtils" import { Sidebar } from "./Sidebar" import { FakeCordonService } from "../../../services/lookoutV2/mocks/FakeCordonService" import FakeGetJobSpecService from "../../../services/lookoutV2/mocks/FakeGetJobSpecService" -import { FakeGetRunErrorService } from "../../../services/lookoutV2/mocks/FakeGetRunErrorService" +import { FakeGetRunInfoService } from "../../../services/lookoutV2/mocks/FakeGetRunInfoService" import { FakeLogService } from "../../../services/lookoutV2/mocks/FakeLogService" describe("Sidebar", () => { @@ -43,7 +43,7 @@ describe("Sidebar", () => { - + diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx index c76766e7f8c..8e2c7d470b1 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx @@ -22,23 +22,27 @@ import styles from "./SidebarTabJobRuns.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { getAccessToken, useUserManager } from "../../../oidc" import { ICordonService } from "../../../services/lookoutV2/CordonService" -import { IGetRunErrorService } from "../../../services/lookoutV2/GetRunErrorService" +import { IGetRunInfoService } from "../../../services/lookoutV2/GetRunInfoService" import { getErrorMessage } from "../../../utils" export interface SidebarTabJobRunsProps { job: Job - runErrorService: IGetRunErrorService + runInfoService: IGetRunInfoService cordonService: ICordonService } type LoadState = "Idle" | "Loading" -export const SidebarTabJobRuns = ({ job, runErrorService, cordonService }: SidebarTabJobRunsProps) => { +export const SidebarTabJobRuns = ({ job, runInfoService, cordonService }: SidebarTabJobRunsProps) => { const mounted = useRef(false) const openSnackbar = useCustomSnackbar() const runsNewestFirst = useMemo(() => [...job.runs].reverse(), [job]) const [runErrorMap, setRunErrorMap] = useState>(new Map()) const [runErrorLoadingMap, setRunErrorLoadingMap] = useState>(new Map()) + const [runDebugMessageMap, setRunDebugMessageMap] = useState>(new Map()) + const [runDebugMessageLoadingMap, setRunDebugMessageLoadingMap] = useState>( + new Map(), + ) const [open, setOpen] = useState(false) const fetchRunErrors = useCallback(async () => { @@ -52,7 +56,7 @@ export const SidebarTabJobRuns = ({ job, runErrorService, cordonService }: Sideb for (const run of job.runs) { results.push({ runId: run.runId, - promise: runErrorService.getRunError(run.runId), + promise: runInfoService.getRunError(run.runId), }) } @@ -84,9 +88,56 @@ export const SidebarTabJobRuns = ({ job, runErrorService, cordonService }: Sideb } }, [job]) + const fetchRunDebugMessages = useCallback(async () => { + const newRunDebugMessageLoadingMap = new Map() + for (const run of job.runs) { + newRunDebugMessageLoadingMap.set(run.runId, "Loading") + } + setRunDebugMessageLoadingMap(newRunDebugMessageLoadingMap) + + const results: { runId: string; promise: Promise }[] = [] + for (const run of job.runs) { + results.push({ + runId: run.runId, + promise: runInfoService.getRunDebugMessage(run.runId), + }) + } + + const newRunDebugMessageMap = new Map(runErrorMap) + for (const result of results) { + result.promise + .then((debugMessage) => { + if (!mounted.current) { + return + } + newRunDebugMessageMap.set(result.runId, debugMessage) + setRunDebugMessageMap(new Map(newRunDebugMessageMap)) + }) + .catch(async (e) => { + const errMsg = await getErrorMessage(e) + console.error(errMsg) + if (!mounted.current) { + return + } + openSnackbar( + "Failed to retrieve Job Run debug message for Run with ID: " + result.runId + ": " + errMsg, + "error", + ) + }) + .finally(() => { + if (!mounted.current) { + return + } + newRunDebugMessageLoadingMap.set(result.runId, "Idle") + setRunErrorLoadingMap(new Map(newRunDebugMessageLoadingMap)) + }) + } + }, [job]) + useEffect(() => { mounted.current = true fetchRunErrors() + fetchRunDebugMessages() return () => { mounted.current = false } @@ -199,6 +250,19 @@ export const SidebarTabJobRuns = ({ job, runErrorService, cordonService }: Sideb {} )} + {runDebugMessageLoadingMap.has(run.runId) && runDebugMessageLoadingMap.get(run.runId) === "Loading" && ( +
+ +
+ )} + {runDebugMessageMap.has(run.runId) && runDebugMessageMap.get(run.runId) !== "" && ( + + } aria-controls="panel1d-content" id="panel1d-header"> + Debug + + {} + + )} ) })} diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx index a4f9d4a134b..6f484316451 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx @@ -12,11 +12,11 @@ import { v4 as uuidv4 } from "uuid" import { JobsTableContainer } from "./JobsTableContainer" import { IGetJobSpecService } from "../../services/lookoutV2/GetJobSpecService" -import { IGetRunErrorService } from "../../services/lookoutV2/GetRunErrorService" +import { IGetRunInfoService } from "../../services/lookoutV2/GetRunInfoService" import { ILogService } from "../../services/lookoutV2/LogService" import { FakeCordonService } from "../../services/lookoutV2/mocks/FakeCordonService" import FakeGetJobSpecService from "../../services/lookoutV2/mocks/FakeGetJobSpecService" -import { FakeGetRunErrorService } from "../../services/lookoutV2/mocks/FakeGetRunErrorService" +import { FakeGetRunInfoService } from "../../services/lookoutV2/mocks/FakeGetRunInfoService" import { FakeLogService } from "../../services/lookoutV2/mocks/FakeLogService" // This is quite a heavy component, and tests can timeout on a slower machine @@ -56,7 +56,7 @@ function makeTestJobs( describe("JobsTableContainer", () => { let getJobsService: IGetJobsService, groupJobsService: IGroupJobsService, - runErrorService: IGetRunErrorService, + runErrorService: IGetRunInfoService, jobSpecService: IGetJobSpecService, logService: ILogService, updateJobsService: UpdateJobsService @@ -68,7 +68,7 @@ describe("JobsTableContainer", () => { beforeEach(() => { setUp([]) - runErrorService = new FakeGetRunErrorService(false) + runErrorService = new FakeGetRunInfoService(false) jobSpecService = new FakeGetJobSpecService(false) logService = new FakeLogService() localStorage.clear() @@ -85,7 +85,7 @@ describe("JobsTableContainer", () => { getJobsService={getJobsService} groupJobsService={groupJobsService} updateJobsService={updateJobsService} - runErrorService={runErrorService} + runInfoService={runErrorService} jobSpecService={jobSpecService} logService={logService} cordonService={new FakeCordonService()} diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx index c984656fe71..86e1c7e21c2 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx @@ -42,7 +42,7 @@ import { isJobGroupRow, JobRow, JobTableRow } from "models/jobsTableModels" import { Job, JobFilter, JobId, Match, SortDirection } from "models/lookoutV2Models" import { useLocation, useNavigate, useParams } from "react-router-dom" import { IGetJobsService } from "services/lookoutV2/GetJobsService" -import { IGetRunErrorService } from "services/lookoutV2/GetRunErrorService" +import { IGetRunInfoService } from "services/lookoutV2/GetRunInfoService" import { IGroupJobsService } from "services/lookoutV2/GroupJobsService" import { JobsTablePreferences, JobsTablePreferencesService } from "services/lookoutV2/JobsTablePreferencesService" import { UpdateJobsService } from "services/lookoutV2/UpdateJobsService" @@ -82,7 +82,7 @@ interface JobsTableContainerProps { getJobsService: IGetJobsService groupJobsService: IGroupJobsService updateJobsService: UpdateJobsService - runErrorService: IGetRunErrorService + runInfoService: IGetRunInfoService jobSpecService: IGetJobSpecService logService: ILogService cordonService: ICordonService @@ -127,7 +127,7 @@ export const JobsTableContainer = ({ getJobsService, groupJobsService, updateJobsService, - runErrorService, + runInfoService, jobSpecService, logService, cordonService, @@ -854,7 +854,7 @@ export const JobsTableContainer = ({ {sidebarJobDetails !== undefined && ( = { @@ -50,6 +51,7 @@ export const jobRunStateDisplayInfo: Record -} - -export class GetRunErrorService implements IGetRunErrorService { - async getRunError(runId: string, abortSignal?: AbortSignal): Promise { - const response = await fetch("/api/v1/jobRunError", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - runId, - }), - signal: abortSignal, - }) - - const json = await response.json() - return json.errorString ?? "" - } -} diff --git a/internal/lookout/ui/src/services/lookoutV2/GetRunInfoService.ts b/internal/lookout/ui/src/services/lookoutV2/GetRunInfoService.ts new file mode 100644 index 00000000000..a6f88be2cf8 --- /dev/null +++ b/internal/lookout/ui/src/services/lookoutV2/GetRunInfoService.ts @@ -0,0 +1,33 @@ +export interface IGetRunInfoService { + getRunError(runId: string, abortSignal?: AbortSignal): Promise + getRunDebugMessage(runId: string, abortSignal?: AbortSignal): Promise +} + +export class GetRunInfoService implements IGetRunInfoService { + async getRunError(runId: string, abortSignal?: AbortSignal): Promise { + const response = await fetch("/api/v1/jobRunError", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + runId, + }), + signal: abortSignal, + }) + + const json = await response.json() + return json.errorString ?? "" + } + async getRunDebugMessage(runId: string, abortSignal?: AbortSignal): Promise { + const response = await fetch("/api/v1/jobRunDebugMessage", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + runId, + }), + signal: abortSignal, + }) + + const json = await response.json() + return json.errorString ?? "" + } +} diff --git a/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunErrorService.ts b/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunErrorService.ts deleted file mode 100644 index 638d3cf2d13..00000000000 --- a/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunErrorService.ts +++ /dev/null @@ -1,79 +0,0 @@ -import { simulateApiWait } from "../../../utils/fakeJobsUtils" -import { IGetRunErrorService } from "../GetRunErrorService" - -export class FakeGetRunErrorService implements IGetRunErrorService { - constructor(private simulateApiWait = true) {} - - async getRunError(runId: string, signal?: AbortSignal): Promise { - if (this.simulateApiWait) { - await simulateApiWait(signal) - } - if (runId === "doesnotexist") { - throw new Error("Failed to retrieve job run because of reasons") - } - return Promise.resolve( - "javax.servlet.ServletException: Something bad happened\n" + - " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:60)\n" + - " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + - " at com.example.myproject.ExceptionHandlerFilter.doFilter(ExceptionHandlerFilter.java:28)\n" + - " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + - " at com.example.myproject.OutputBufferFilter.doFilter(OutputBufferFilter.java:33)\n" + - " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + - " at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:388)\n" + - " at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)\n" + - " at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)\n" + - " at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:765)\n" + - " at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:418)\n" + - " at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n" + - " at org.mortbay.jetty.Server.handle(Server.java:326)\n" + - " at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n" + - " at org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:943)\n" + - " at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:756)\n" + - " at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:218)\n" + - " at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n" + - " at org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n" + - " at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n" + - "Caused by: com.example.myproject.MyProjectServletException\n" + - " at com.example.myproject.MyServlet.doPost(MyServlet.java:169)\n" + - " at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)\n" + - " at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)\n" + - " at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)\n" + - " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1166)\n" + - " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:30)\n" + - " ... 27 more\n" + - "Caused by: org.hibernate.exception.ConstraintViolationException: could not insert: [com.example.myproject.MyEntity]\n" + - " at org.hibernate.exception.SQLStateConverter.convert(SQLStateConverter.java:96)\n" + - " at org.hibernate.exception.JDBCExceptionHelper.convert(JDBCExceptionHelper.java:66)\n" + - " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:64)\n" + - " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2329)\n" + - " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2822)\n" + - " at org.hibernate.action.EntityIdentityInsertAction.execute(EntityIdentityInsertAction.java:71)\n" + - " at org.hibernate.engine.ActionQueue.execute(ActionQueue.java:268)\n" + - " at org.hibernate.event.def.AbstractSaveEventListener.performSaveOrReplicate(AbstractSaveEventListener.java:321)\n" + - " at org.hibernate.event.def.AbstractSaveEventListener.performSave(AbstractSaveEventListener.java:204)\n" + - " at org.hibernate.event.def.AbstractSaveEventListener.saveWithGeneratedId(AbstractSaveEventListener.java:130)\n" + - " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.saveWithGeneratedOrRequestedId(DefaultSaveOrUpdateEventListener.java:210)\n" + - " at org.hibernate.event.def.DefaultSaveEventListener.saveWithGeneratedOrRequestedId(DefaultSaveEventListener.java:56)\n" + - " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.entityIsTransient(DefaultSaveOrUpdateEventListener.java:195)\n" + - " at org.hibernate.event.def.DefaultSaveEventListener.performSaveOrUpdate(DefaultSaveEventListener.java:50)\n" + - " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.onSaveOrUpdate(DefaultSaveOrUpdateEventListener.java:93)\n" + - " at org.hibernate.impl.SessionImpl.fireSave(SessionImpl.java:705)\n" + - " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:693)\n" + - " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:689)\n" + - " at sun.reflect.GeneratedMethodAccessor5.invoke(Unknown Source)\n" + - " at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n" + - " at java.lang.reflect.Method.invoke(Method.java:597)\n" + - " at org.hibernate.context.ThreadLocalSessionContext$TransactionProtectionWrapper.invoke(ThreadLocalSessionContext.java:344)\n" + - " at $Proxy19.save(Unknown Source)\n" + - " at com.example.myproject.MyEntityService.save(MyEntityService.java:59) <-- relevant call (see notes below)\n" + - " at com.example.myproject.MyServlet.doPost(MyServlet.java:164)\n" + - " ... 32 more\n" + - "Caused by: java.sql.SQLException: Violation of unique constraint MY_ENTITY_UK_1: duplicate value(s) for column(s) MY_COLUMN in statement [...]\n" + - " at org.hsqldb.jdbc.Util.throwError(Unknown Source)\n" + - " at org.hsqldb.jdbc.jdbcPreparedStatement.executeUpdate(Unknown Source)\n" + - " at com.mchange.v2.c3p0.impl.NewProxyPreparedStatement.executeUpdate(NewProxyPreparedStatement.java:105)\n" + - " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:57)\n" + - " ... 54 more", - ) - } -} diff --git a/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunInfoService.ts b/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunInfoService.ts new file mode 100644 index 00000000000..2f37513c896 --- /dev/null +++ b/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetRunInfoService.ts @@ -0,0 +1,152 @@ +import { simulateApiWait } from "../../../utils/fakeJobsUtils" +import { IGetRunInfoService } from "../GetRunInfoService" + +export class FakeGetRunInfoService implements IGetRunInfoService { + constructor(private simulateApiWait = true) {} + + async getRunError(runId: string, signal?: AbortSignal): Promise { + if (this.simulateApiWait) { + await simulateApiWait(signal) + } + if (runId === "doesnotexist") { + throw new Error("Failed to retrieve job run because of reasons") + } + return Promise.resolve( + "javax.servlet.ServletException: Something bad happened\n" + + " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:60)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at com.example.myproject.ExceptionHandlerFilter.doFilter(ExceptionHandlerFilter.java:28)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at com.example.myproject.OutputBufferFilter.doFilter(OutputBufferFilter.java:33)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:388)\n" + + " at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)\n" + + " at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)\n" + + " at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:765)\n" + + " at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:418)\n" + + " at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n" + + " at org.mortbay.jetty.Server.handle(Server.java:326)\n" + + " at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n" + + " at org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:943)\n" + + " at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:756)\n" + + " at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:218)\n" + + " at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n" + + " at org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n" + + " at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n" + + "Caused by: com.example.myproject.MyProjectServletException\n" + + " at com.example.myproject.MyServlet.doPost(MyServlet.java:169)\n" + + " at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)\n" + + " at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)\n" + + " at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1166)\n" + + " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:30)\n" + + " ... 27 more\n" + + "Caused by: org.hibernate.exception.ConstraintViolationException: could not insert: [com.example.myproject.MyEntity]\n" + + " at org.hibernate.exception.SQLStateConverter.convert(SQLStateConverter.java:96)\n" + + " at org.hibernate.exception.JDBCExceptionHelper.convert(JDBCExceptionHelper.java:66)\n" + + " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:64)\n" + + " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2329)\n" + + " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2822)\n" + + " at org.hibernate.action.EntityIdentityInsertAction.execute(EntityIdentityInsertAction.java:71)\n" + + " at org.hibernate.engine.ActionQueue.execute(ActionQueue.java:268)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.performSaveOrReplicate(AbstractSaveEventListener.java:321)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.performSave(AbstractSaveEventListener.java:204)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.saveWithGeneratedId(AbstractSaveEventListener.java:130)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.saveWithGeneratedOrRequestedId(DefaultSaveOrUpdateEventListener.java:210)\n" + + " at org.hibernate.event.def.DefaultSaveEventListener.saveWithGeneratedOrRequestedId(DefaultSaveEventListener.java:56)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.entityIsTransient(DefaultSaveOrUpdateEventListener.java:195)\n" + + " at org.hibernate.event.def.DefaultSaveEventListener.performSaveOrUpdate(DefaultSaveEventListener.java:50)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.onSaveOrUpdate(DefaultSaveOrUpdateEventListener.java:93)\n" + + " at org.hibernate.impl.SessionImpl.fireSave(SessionImpl.java:705)\n" + + " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:693)\n" + + " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:689)\n" + + " at sun.reflect.GeneratedMethodAccessor5.invoke(Unknown Source)\n" + + " at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n" + + " at java.lang.reflect.Method.invoke(Method.java:597)\n" + + " at org.hibernate.context.ThreadLocalSessionContext$TransactionProtectionWrapper.invoke(ThreadLocalSessionContext.java:344)\n" + + " at $Proxy19.save(Unknown Source)\n" + + " at com.example.myproject.MyEntityService.save(MyEntityService.java:59) <-- relevant call (see notes below)\n" + + " at com.example.myproject.MyServlet.doPost(MyServlet.java:164)\n" + + " ... 32 more\n" + + "Caused by: java.sql.SQLException: Violation of unique constraint MY_ENTITY_UK_1: duplicate value(s) for column(s) MY_COLUMN in statement [...]\n" + + " at org.hsqldb.jdbc.Util.throwError(Unknown Source)\n" + + " at org.hsqldb.jdbc.jdbcPreparedStatement.executeUpdate(Unknown Source)\n" + + " at com.mchange.v2.c3p0.impl.NewProxyPreparedStatement.executeUpdate(NewProxyPreparedStatement.java:105)\n" + + " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:57)\n" + + " ... 54 more", + ) + } + + async getRunDebugMessage(runId: string, signal?: AbortSignal): Promise { + if (this.simulateApiWait) { + await simulateApiWait(signal) + } + if (runId === "doesnotexist") { + throw new Error("Failed to retrieve job run because of reasons") + } + return Promise.resolve( + "javax.servlet.ServletException: Something bad happened\n" + + " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:60)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at com.example.myproject.ExceptionHandlerFilter.doFilter(ExceptionHandlerFilter.java:28)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at com.example.myproject.OutputBufferFilter.doFilter(OutputBufferFilter.java:33)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)\n" + + " at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:388)\n" + + " at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)\n" + + " at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)\n" + + " at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:765)\n" + + " at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:418)\n" + + " at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n" + + " at org.mortbay.jetty.Server.handle(Server.java:326)\n" + + " at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n" + + " at org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:943)\n" + + " at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:756)\n" + + " at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:218)\n" + + " at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n" + + " at org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n" + + " at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n" + + "Caused by: com.example.myproject.MyProjectServletException\n" + + " at com.example.myproject.MyServlet.doPost(MyServlet.java:169)\n" + + " at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)\n" + + " at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)\n" + + " at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)\n" + + " at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1166)\n" + + " at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:30)\n" + + " ... 27 more\n" + + "Caused by: org.hibernate.exception.ConstraintViolationException: could not insert: [com.example.myproject.MyEntity]\n" + + " at org.hibernate.exception.SQLStateConverter.convert(SQLStateConverter.java:96)\n" + + " at org.hibernate.exception.JDBCExceptionHelper.convert(JDBCExceptionHelper.java:66)\n" + + " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:64)\n" + + " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2329)\n" + + " at org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:2822)\n" + + " at org.hibernate.action.EntityIdentityInsertAction.execute(EntityIdentityInsertAction.java:71)\n" + + " at org.hibernate.engine.ActionQueue.execute(ActionQueue.java:268)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.performSaveOrReplicate(AbstractSaveEventListener.java:321)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.performSave(AbstractSaveEventListener.java:204)\n" + + " at org.hibernate.event.def.AbstractSaveEventListener.saveWithGeneratedId(AbstractSaveEventListener.java:130)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.saveWithGeneratedOrRequestedId(DefaultSaveOrUpdateEventListener.java:210)\n" + + " at org.hibernate.event.def.DefaultSaveEventListener.saveWithGeneratedOrRequestedId(DefaultSaveEventListener.java:56)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.entityIsTransient(DefaultSaveOrUpdateEventListener.java:195)\n" + + " at org.hibernate.event.def.DefaultSaveEventListener.performSaveOrUpdate(DefaultSaveEventListener.java:50)\n" + + " at org.hibernate.event.def.DefaultSaveOrUpdateEventListener.onSaveOrUpdate(DefaultSaveOrUpdateEventListener.java:93)\n" + + " at org.hibernate.impl.SessionImpl.fireSave(SessionImpl.java:705)\n" + + " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:693)\n" + + " at org.hibernate.impl.SessionImpl.save(SessionImpl.java:689)\n" + + " at sun.reflect.GeneratedMethodAccessor5.invoke(Unknown Source)\n" + + " at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n" + + " at java.lang.reflect.Method.invoke(Method.java:597)\n" + + " at org.hibernate.context.ThreadLocalSessionContext$TransactionProtectionWrapper.invoke(ThreadLocalSessionContext.java:344)\n" + + " at $Proxy19.save(Unknown Source)\n" + + " at com.example.myproject.MyEntityService.save(MyEntityService.java:59) <-- relevant call (see notes below)\n" + + " at com.example.myproject.MyServlet.doPost(MyServlet.java:164)\n" + + " ... 32 more\n" + + "Caused by: java.sql.SQLException: Violation of unique constraint MY_ENTITY_UK_1: duplicate value(s) for column(s) MY_COLUMN in statement [...]\n" + + " at org.hsqldb.jdbc.Util.throwError(Unknown Source)\n" + + " at org.hsqldb.jdbc.jdbcPreparedStatement.executeUpdate(Unknown Source)\n" + + " at com.mchange.v2.c3p0.impl.NewProxyPreparedStatement.executeUpdate(NewProxyPreparedStatement.java:105)\n" + + " at org.hibernate.id.insert.AbstractSelectingDelegate.performInsert(AbstractSelectingDelegate.java:57)\n" + + " ... 54 more", + ) + } +} diff --git a/internal/lookout/ui/src/utils/jobsTableColumns.test.tsx b/internal/lookout/ui/src/utils/jobsTableColumns.test.tsx new file mode 100644 index 00000000000..47c526ed221 --- /dev/null +++ b/internal/lookout/ui/src/utils/jobsTableColumns.test.tsx @@ -0,0 +1,28 @@ +import { formatSeconds } from "./jobsTableColumns" + +describe("formatSeconds", () => { + it("should return an empty string when seconds is undefined", () => { + const result = formatSeconds(undefined) + expect(result).toBe("") + }) + + it("should return an empty string when seconds is 0", () => { + const result = formatSeconds(0) + expect(result).toBe("") + }) + + it("should format seconds correctly when less than 60", () => { + const result = formatSeconds(45) + expect(result).toBe("45s") + }) + + it("should format minutes and seconds correctly when less than 3600", () => { + const result = formatSeconds(135) + expect(result).toBe("2m 15s") + }) + + it("should format hours, minutes, and seconds correctly when greater than 3600", () => { + const result = formatSeconds(7265) + expect(result).toBe("2h 1m 5s") + }) +}) diff --git a/internal/lookout/ui/src/utils/jobsTableColumns.tsx b/internal/lookout/ui/src/utils/jobsTableColumns.tsx index 309511fde06..43fb7db693c 100644 --- a/internal/lookout/ui/src/utils/jobsTableColumns.tsx +++ b/internal/lookout/ui/src/utils/jobsTableColumns.tsx @@ -54,6 +54,7 @@ export enum StandardColumnId { Node = "node", Cluster = "cluster", ExitCode = "exitCode", + RuntimeSeconds = "runtimeSeconds", } export const ANNOTATION_COLUMN_PREFIX = "annotation_" @@ -421,8 +422,25 @@ export const JOB_COLUMNS: JobTableColumn[] = [ size: 100, }, }), + accessorColumn({ + id: StandardColumnId.RuntimeSeconds, + accessor: "runtimeSeconds", + displayName: "Runtime", + additionalOptions: { + size: 100, + cell: (cellInfo) => formatSeconds(cellInfo.cell.row.original.runtimeSeconds), + }, + }), ] +export function formatSeconds(seconds: number | undefined): string { + if (seconds === undefined || seconds === 0) return "" + const hours = Math.floor(seconds / 3600) + const minutes = Math.floor((seconds % 3600) / 60) + const remainingSeconds = seconds % 60 + return `${hours ? `${hours}h ` : ""}${hours || minutes ? `${minutes}m ` : ""}${remainingSeconds}s`.trim() +} + export const DEFAULT_COLUMNS_TO_DISPLAY: Set = new Set([ StandardColumnId.SelectorCol, StandardColumnId.Queue, diff --git a/internal/lookoutingesterv2/benchmark/benchmark.go b/internal/lookoutingesterv2/benchmark/benchmark.go index 9321de7a551..494bf7578bb 100644 --- a/internal/lookoutingesterv2/benchmark/benchmark.go +++ b/internal/lookoutingesterv2/benchmark/benchmark.go @@ -44,10 +44,9 @@ func withDbBenchmark(b *testing.B, config configuration.LookoutIngesterV2Configu func benchmarkSubmissions1000(b *testing.B, config configuration.LookoutIngesterV2Configuration) { const n = 1000 jobIds := makeUlids(n) - jobsToCreate, userAnnotationsToCreate := createJobInstructions(jobIds, n, 10*n) + jobsToCreate := createJobInstructions(jobIds, n) instructions := &model.InstructionSet{ - JobsToCreate: jobsToCreate, - UserAnnotationsToCreate: userAnnotationsToCreate, + JobsToCreate: jobsToCreate, } withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) @@ -63,10 +62,9 @@ func benchmarkSubmissions1000(b *testing.B, config configuration.LookoutIngester func benchmarkSubmissions10000(b *testing.B, config configuration.LookoutIngesterV2Configuration) { const n = 10000 jobIds := makeUlids(n) - jobsToCreate, userAnnotationsToCreate := createJobInstructions(jobIds, n, 10*n) + jobsToCreate := createJobInstructions(jobIds, n) instructions := &model.InstructionSet{ - JobsToCreate: jobsToCreate, - UserAnnotationsToCreate: userAnnotationsToCreate, + JobsToCreate: jobsToCreate, } withDbBenchmark(b, config, func(b *testing.B, db *pgxpool.Pool) { ldb := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) @@ -89,10 +87,9 @@ func benchmarkUpdates1000(b *testing.B, config configuration.LookoutIngesterV2Co jobIds := makeUlids(n) jobRunIds := makeUuids(runsPerJob * n) - jobsToCreate, userAnnotationsToCreate := createJobInstructions(jobIds, n, 10*n) + jobsToCreate := createJobInstructions(jobIds, n) initialInstructions := &model.InstructionSet{ - JobsToCreate: jobsToCreate, - UserAnnotationsToCreate: userAnnotationsToCreate, + JobsToCreate: jobsToCreate, } instructions := &model.InstructionSet{ @@ -126,10 +123,9 @@ func benchmarkUpdates10000(b *testing.B, config configuration.LookoutIngesterV2C jobIds := makeUlids(n) jobRunIds := makeUuids(runsPerJob * n) - jobsToCreate, userAnnotationsToCreate := createJobInstructions(jobIds, n, 10*n) + jobsToCreate := createJobInstructions(jobIds, n) initialInstructions := &model.InstructionSet{ - JobsToCreate: jobsToCreate, - UserAnnotationsToCreate: userAnnotationsToCreate, + JobsToCreate: jobsToCreate, } instructions := &model.InstructionSet{ @@ -169,7 +165,7 @@ func makeUuids(n int) []string { return uuids } -func createJobInstructions(jobIds []string, numJobs int, numUserAnnotations int) ([]*model.CreateJobInstruction, []*model.CreateUserAnnotationInstruction) { +func createJobInstructions(jobIds []string, numJobs int) []*model.CreateJobInstruction { createJobInstructions := make([]*model.CreateJobInstruction, numJobs) jobBytes := make([]byte, 10000, 10000) rand.Read(jobBytes) @@ -194,21 +190,7 @@ func createJobInstructions(jobIds []string, numJobs int, numUserAnnotations int) Annotations: make(map[string]string), } } - createUserAnnotationInstructions := make([]*model.CreateUserAnnotationInstruction, numUserAnnotations) - for i := 0; i < numUserAnnotations; i++ { - job := createJobInstructions[i%len(createJobInstructions)] - k := uuid.NewString() - v := uuid.NewString() - job.Annotations[k] = v - createUserAnnotationInstructions[i] = &model.CreateUserAnnotationInstruction{ - JobId: job.JobId, - Key: k, - Value: v, - Queue: job.Queue, - Jobset: job.JobSet, - } - } - return createJobInstructions, createUserAnnotationInstructions + return createJobInstructions } func createJobRunInstructions(n int, runIds []string) []*model.CreateJobRunInstruction { diff --git a/internal/lookoutingesterv2/configuration/types.go b/internal/lookoutingesterv2/configuration/types.go index 126a809faa4..c9540d3b21d 100644 --- a/internal/lookoutingesterv2/configuration/types.go +++ b/internal/lookoutingesterv2/configuration/types.go @@ -31,9 +31,6 @@ type LookoutIngesterV2Configuration struct { // Between each attempt to store data in the database, there is an exponential backoff (starting out as 1s). // MaxBackoff caps this backoff to whatever it is specified (in seconds) MaxBackoff int - // If the ingester should process events using the legacy event conversion logic - // The two schedulers produce slightly different events - so need to be processed differently - UseLegacyEventConversion bool // If non-nil, net/http/pprof endpoints are exposed on localhost on this port. PprofPort *uint16 // List of Regexes which will identify fatal errors when inserting into postgres diff --git a/internal/lookoutingesterv2/ingester.go b/internal/lookoutingesterv2/ingester.go index 5e34ba4e8ae..a2c3d041411 100644 --- a/internal/lookoutingesterv2/ingester.go +++ b/internal/lookoutingesterv2/ingester.go @@ -60,7 +60,7 @@ func Run(config *configuration.LookoutIngesterV2Configuration) { }() } - converter := instructions.NewInstructionConverter(m, config.UserAnnotationPrefix, compressor, config.UseLegacyEventConversion) + converter := instructions.NewInstructionConverter(m, config.UserAnnotationPrefix, compressor) ingester := ingest.NewIngestionPipeline[*model.InstructionSet]( config.Pulsar, diff --git a/internal/lookoutingesterv2/instructions/instructions.go b/internal/lookoutingesterv2/instructions/instructions.go index 34c3d07f8ba..8d6268fd063 100644 --- a/internal/lookoutingesterv2/instructions/instructions.go +++ b/internal/lookoutingesterv2/instructions/instructions.go @@ -1,13 +1,10 @@ package instructions import ( - "fmt" - "sort" "strings" "time" "github.com/gogo/protobuf/proto" - "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" @@ -41,10 +38,9 @@ type HasNodeName interface { } type InstructionConverter struct { - metrics *metrics.Metrics - userAnnotationPrefix string - compressor compress.Compressor - useLegacyEventConversion bool + metrics *metrics.Metrics + userAnnotationPrefix string + compressor compress.Compressor } type jobResources struct { @@ -54,19 +50,14 @@ type jobResources struct { Gpu int64 } -func NewInstructionConverter(m *metrics.Metrics, userAnnotationPrefix string, compressor compress.Compressor, useLegacyEventConversion bool) *InstructionConverter { +func NewInstructionConverter(m *metrics.Metrics, userAnnotationPrefix string, compressor compress.Compressor) *InstructionConverter { return &InstructionConverter{ - metrics: m, - userAnnotationPrefix: userAnnotationPrefix, - compressor: compressor, - useLegacyEventConversion: useLegacyEventConversion, + metrics: m, + userAnnotationPrefix: userAnnotationPrefix, + compressor: compressor, } } -func (c *InstructionConverter) IsLegacy() bool { - return c.useLegacyEventConversion -} - func (c *InstructionConverter) Convert(ctx *armadacontext.Context, sequencesWithIds *ingest.EventSequencesWithIds) *model.InstructionSet { updateInstructions := &model.InstructionSet{ MessageIds: sequencesWithIds.MessageIds, @@ -106,33 +97,28 @@ func (c *InstructionConverter) convertSequence( case *armadaevents.EventSequence_Event_JobErrors: err = c.handleJobErrors(ts, event.GetJobErrors(), update) case *armadaevents.EventSequence_Event_JobRunAssigned: - if c.useLegacyEventConversion { - err = c.handleLegacyJobRunAssigned(ts, event.GetJobRunAssigned(), update) - } else { - err = c.handleJobRunAssigned(ts, event.GetJobRunAssigned(), update) - } + err = c.handleJobRunAssigned(ts, event.GetJobRunAssigned(), update) case *armadaevents.EventSequence_Event_JobRunRunning: err = c.handleJobRunRunning(ts, event.GetJobRunRunning(), update) + case *armadaevents.EventSequence_Event_JobRunCancelled: + err = c.handleJobRunCancelled(ts, event.GetJobRunCancelled(), update) case *armadaevents.EventSequence_Event_JobRunSucceeded: err = c.handleJobRunSucceeded(ts, event.GetJobRunSucceeded(), update) case *armadaevents.EventSequence_Event_JobRunErrors: err = c.handleJobRunErrors(ts, event.GetJobRunErrors(), update) - case *armadaevents.EventSequence_Event_JobDuplicateDetected: - err = c.handleJobDuplicateDetected(ts, event.GetJobDuplicateDetected(), update) case *armadaevents.EventSequence_Event_JobRunPreempted: err = c.handleJobRunPreempted(ts, event.GetJobRunPreempted(), update) case *armadaevents.EventSequence_Event_JobRequeued: err = c.handleJobRequeued(ts, event.GetJobRequeued(), update) case *armadaevents.EventSequence_Event_JobRunLeased: - if !c.useLegacyEventConversion { - err = c.handleJobRunLeased(ts, event.GetJobRunLeased(), update) - } + err = c.handleJobRunLeased(ts, event.GetJobRunLeased(), update) case *armadaevents.EventSequence_Event_ReprioritiseJobSet: case *armadaevents.EventSequence_Event_CancelJob: case *armadaevents.EventSequence_Event_CancelJobSet: case *armadaevents.EventSequence_Event_ResourceUtilisation: case *armadaevents.EventSequence_Event_StandaloneIngressInfo: case *armadaevents.EventSequence_Event_PartitionMarker: + case *armadaevents.EventSequence_Event_JobValidated: log.Debugf("Ignoring event type %T", event.GetEvent()) default: log.Warnf("Ignoring unknown event type %T", event.GetEvent()) @@ -212,9 +198,6 @@ func (c *InstructionConverter) handleSubmitJob( } update.JobsToCreate = append(update.JobsToCreate, &job) - annotationInstructions := createUserAnnotationInstructions(jobId, queue, jobSet, annotations) - update.UserAnnotationsToCreate = append(update.UserAnnotationsToCreate, annotationInstructions...) - return err } @@ -235,31 +218,6 @@ func extractUserAnnotations(userAnnotationPrefix string, jobAnnotations map[stri return result } -func createUserAnnotationInstructions(jobId string, queue string, jobset string, userAnnotations map[string]string) []*model.CreateUserAnnotationInstruction { - // This intermediate variable exists because we want our output to be deterministic - // Iteration over a map in go is non-deterministic, so we read everything into annotations - // and then sort it. - instructions := make([]*model.CreateUserAnnotationInstruction, 0, len(userAnnotations)) - for k, v := range userAnnotations { - if k != "" { - instructions = append(instructions, &model.CreateUserAnnotationInstruction{ - JobId: jobId, - Key: k, - Value: v, - Queue: queue, - Jobset: jobset, - }) - } else { - log.WithField("JobId", jobId).Warnf("Ignoring annotation with empty key") - } - } - // sort to make output deterministic - sort.Slice(instructions, func(i, j int) bool { - return instructions[i].Key < instructions[j].Key - }) - return instructions -} - func (c *InstructionConverter) handleReprioritiseJob(ts time.Time, event *armadaevents.ReprioritisedJob, update *model.InstructionSet) error { jobId, err := armadaevents.UlidStringFromProtoUuid(event.GetJobId()) if err != nil { @@ -275,21 +233,6 @@ func (c *InstructionConverter) handleReprioritiseJob(ts time.Time, event *armada return nil } -func (c *InstructionConverter) handleJobDuplicateDetected(ts time.Time, event *armadaevents.JobDuplicateDetected, update *model.InstructionSet) error { - jobId, err := armadaevents.UlidStringFromProtoUuid(event.GetNewJobId()) - if err != nil { - c.metrics.RecordPulsarMessageError(metrics.PulsarMessageErrorProcessing) - return err - } - - jobUpdate := model.UpdateJobInstruction{ - JobId: jobId, - Duplicate: pointer.Bool(true), - } - update.JobsToUpdate = append(update.JobsToUpdate, &jobUpdate) - return nil -} - func (c *InstructionConverter) handleCancelledJob(ts time.Time, event *armadaevents.CancelledJob, update *model.InstructionSet) error { jobId, err := armadaevents.UlidStringFromProtoUuid(event.GetJobId()) if err != nil { @@ -337,24 +280,29 @@ func (c *InstructionConverter) handleJobErrors(ts time.Time, event *armadaevents return err } - isTerminal := false - for _, e := range event.GetErrors() { - if e.Terminal { - isTerminal = true - break + if !e.Terminal { + continue + } + + state := lookout.JobFailedOrdinal + switch e.Reason.(type) { + // We should have a JobPreempted event rather than relying on type of JobErrors + // For now this is how we can identify if the job was preempted or failed + case *armadaevents.Error_JobRunPreemptedError: + state = lookout.JobPreemptedOrdinal } - } - if isTerminal { jobUpdate := model.UpdateJobInstruction{ JobId: jobId, - State: pointer.Int32(int32(lookout.JobFailedOrdinal)), + State: pointer.Int32(int32(state)), LastTransitionTime: &ts, LastTransitionTimeSeconds: pointer.Int64(ts.Unix()), } update.JobsToUpdate = append(update.JobsToUpdate, &jobUpdate) + break } + return nil } @@ -438,8 +386,8 @@ func (c *InstructionConverter) handleJobRunLeased(ts time.Time, event *armadaeve jobRun := model.CreateJobRunInstruction{ RunId: runId, JobId: jobId, - Cluster: event.ExecutorId, - Node: pointer.String(event.NodeId), + Cluster: util.Truncate(event.ExecutorId, maxClusterLen), + Node: pointer.String(util.Truncate(event.NodeId, maxNodeLen)), Leased: &ts, JobRunState: lookout.JobRunLeasedOrdinal, } @@ -479,43 +427,19 @@ func (c *InstructionConverter) handleJobRunAssigned(ts time.Time, event *armadae return nil } -func (c *InstructionConverter) handleLegacyJobRunAssigned(ts time.Time, event *armadaevents.JobRunAssigned, update *model.InstructionSet) error { - jobId, err := armadaevents.UlidStringFromProtoUuid(event.GetJobId()) - if err != nil { - c.metrics.RecordPulsarMessageError(metrics.PulsarMessageErrorProcessing) - return err - } - +func (c *InstructionConverter) handleJobRunCancelled(ts time.Time, event *armadaevents.JobRunCancelled, update *model.InstructionSet) error { runId, err := armadaevents.UuidStringFromProtoUuid(event.RunId) if err != nil { c.metrics.RecordPulsarMessageError(metrics.PulsarMessageErrorProcessing) - return err - } - - // Update Job - job := model.UpdateJobInstruction{ - JobId: jobId, - State: pointer.Int32(int32(lookout.JobPendingOrdinal)), - LastTransitionTime: &ts, - LastTransitionTimeSeconds: pointer.Int64(ts.Unix()), - LatestRunId: &runId, + return errors.WithStack(err) } - update.JobsToUpdate = append(update.JobsToUpdate, &job) - cluster := "" - if len(event.GetResourceInfos()) > 0 { - cluster = util.Truncate(event.GetResourceInfos()[0].GetObjectMeta().GetExecutorId(), maxClusterLen) - } - // Now create a job run - jobRun := model.CreateJobRunInstruction{ + jobRun := model.UpdateJobRunInstruction{ RunId: runId, - JobId: jobId, - Cluster: cluster, - Leased: &ts, - Pending: &ts, - JobRunState: lookout.JobRunPendingOrdinal, + Finished: &ts, + JobRunState: pointer.Int32(lookout.JobRunCancelledOrdinal), } - update.JobRunsToCreate = append(update.JobRunsToCreate, &jobRun) + update.JobRunsToUpdate = append(update.JobRunsToUpdate, &jobRun) return nil } @@ -550,35 +474,19 @@ func (c *InstructionConverter) handleJobRunErrors(ts time.Time, event *armadaeve } for _, e := range event.GetErrors() { - // Certain legacy events mean we don't have a valid run id - // In this case we have to invent a fake run - // TODO: remove this when the legacy messages go away! - isLegacyEvent := runId == eventutil.LEGACY_RUN_ID - if isLegacyEvent { - jobRun := createFakeJobRun(jobId, ts) - runId = jobRun.RunId - objectMeta := extractMetaFromError(e) - if objectMeta != nil && objectMeta.ExecutorId != "" { - jobRun.Cluster = util.Truncate(objectMeta.ExecutorId, maxClusterLen) - } - update.JobRunsToCreate = append(update.JobRunsToCreate, jobRun) - } - jobRunUpdate := &model.UpdateJobRunInstruction{ RunId: runId, } if e.Terminal { jobRunUpdate.Finished = &ts } - if isLegacyEvent { - jobRunUpdate.Started = &ts - } switch reason := e.Reason.(type) { case *armadaevents.Error_PodError: jobRunUpdate.Node = extractNodeName(reason.PodError) jobRunUpdate.JobRunState = pointer.Int32(lookout.JobRunFailedOrdinal) jobRunUpdate.Error = tryCompressError(jobId, reason.PodError.GetMessage(), c.compressor) + jobRunUpdate.Debug = tryCompressError(jobId, reason.PodError.DebugMessage, c.compressor) var exitCode int32 = 0 for _, containerError := range reason.PodError.ContainerErrors { if containerError.ExitCode != 0 { @@ -589,11 +497,16 @@ func (c *InstructionConverter) handleJobRunErrors(ts time.Time, event *armadaeve jobRunUpdate.ExitCode = pointer.Int32(exitCode) case *armadaevents.Error_PodTerminated: continue + case *armadaevents.Error_JobRunPreemptedError: + // This case is already handled by the JobRunPreempted event + // When we formalise that as a terminal event, we'll remove this JobRunError getting produced + continue case *armadaevents.Error_PodUnschedulable: jobRunUpdate.Node = extractNodeName(reason.PodUnschedulable) case *armadaevents.Error_PodLeaseReturned: jobRunUpdate.JobRunState = pointer.Int32(lookout.JobRunLeaseReturnedOrdinal) jobRunUpdate.Error = tryCompressError(jobId, reason.PodLeaseReturned.GetMessage(), c.compressor) + jobRunUpdate.Debug = tryCompressError(jobId, reason.PodLeaseReturned.GetDebugMessage(), c.compressor) case *armadaevents.Error_LeaseExpired: jobRunUpdate.JobRunState = pointer.Int32(lookout.JobRunLeaseExpiredOrdinal) jobRunUpdate.Error = tryCompressError(jobId, "Lease expired", c.compressor) @@ -621,51 +534,16 @@ func (c *InstructionConverter) handleJobRunPreempted(ts time.Time, event *armada return err } - // Update Job - job := model.UpdateJobInstruction{ - JobId: jobId, - State: pointer.Int32(int32(lookout.JobPreemptedOrdinal)), - LastTransitionTime: &ts, - LastTransitionTimeSeconds: pointer.Int64(ts.Unix()), - LatestRunId: &runId, - } - - update.JobsToUpdate = append(update.JobsToUpdate, &job) - - // Update job run - errorString := "preempted by non armada pod" - preemptiveJobId, err := parseUlidString(event.PreemptiveJobId) - if err != nil { - log.WithError(err).Debug("failed to convert preemptive job id") - } else { - errorString = fmt.Sprintf("preempted by job %s", preemptiveJobId) - } - jobRun := model.UpdateJobRunInstruction{ RunId: runId, JobRunState: pointer.Int32(lookout.JobRunPreemptedOrdinal), Finished: &ts, - Error: tryCompressError(jobId, errorString, c.compressor), + Error: tryCompressError(jobId, "preempted", c.compressor), } update.JobRunsToUpdate = append(update.JobRunsToUpdate, &jobRun) return nil } -func parseUlidString(id *armadaevents.Uuid) (string, error) { - if id == nil { - return "", errors.New("uuid is nil") - } - // Likely wrong if it is zeroed - if id.High64 == 0 && id.Low64 == 0 { - return "", errors.New("") - } - stringId, err := armadaevents.UlidStringFromProtoUuid(id) - if err != nil { - return "", errors.Wrap(err, "could not convert non-nil preemptive job id") - } - return stringId, nil -} - func tryCompressError(jobId string, errorString string, compressor compress.Compressor) []byte { compressedError, err := compressor.Compress([]byte(errorString)) if err != nil { @@ -674,20 +552,6 @@ func tryCompressError(jobId string, errorString string, compressor compress.Comp return compressedError } -func extractMetaFromError(e *armadaevents.Error) *armadaevents.ObjectMeta { - switch err := e.Reason.(type) { - case *armadaevents.Error_PodError: - return err.PodError.ObjectMeta - case *armadaevents.Error_PodTerminated: - return err.PodTerminated.ObjectMeta - case *armadaevents.Error_PodUnschedulable: - return err.PodUnschedulable.ObjectMeta - case *armadaevents.Error_PodLeaseReturned: - return err.PodLeaseReturned.ObjectMeta - } - return nil -} - func getNode(resources []*armadaevents.KubernetesResourceInfo) *string { for _, r := range resources { node := extractNodeName(r.GetPodInfo()) @@ -698,17 +562,6 @@ func getNode(resources []*armadaevents.KubernetesResourceInfo) *string { return pointer.String("UNKNOWN") } -func createFakeJobRun(jobId string, ts time.Time) *model.CreateJobRunInstruction { - runId := uuid.New().String() - return &model.CreateJobRunInstruction{ - RunId: runId, - JobId: jobId, - Cluster: "UNKNOWN", - Pending: &ts, - JobRunState: lookout.JobRunPendingOrdinal, - } -} - func extractNodeName(x HasNodeName) *string { nodeName := x.GetNodeName() if len(nodeName) > 0 { @@ -720,7 +573,7 @@ func extractNodeName(x HasNodeName) *string { func getJobResources(job *api.Job) jobResources { resources := jobResources{} - podSpec := util.PodSpecFromJob(job) + podSpec := job.GetMainPodSpec() for _, container := range podSpec.Containers { resources.Cpu += getResource(container, v1.ResourceCPU, true) @@ -744,7 +597,7 @@ func getResource(container v1.Container, resourceName v1.ResourceName, useMillis } func getJobPriorityClass(job *api.Job) *string { - podSpec := util.PodSpecFromJob(job) + podSpec := job.GetMainPodSpec() if podSpec.PriorityClassName != "" { return pointer.String(podSpec.PriorityClassName) } diff --git a/internal/lookoutingesterv2/instructions/instructions_test.go b/internal/lookoutingesterv2/instructions/instructions_test.go index 74b093bfdd7..827e8c997e0 100644 --- a/internal/lookoutingesterv2/instructions/instructions_test.go +++ b/internal/lookoutingesterv2/instructions/instructions_test.go @@ -1,13 +1,11 @@ package instructions import ( - "fmt" "strings" "testing" "github.com/apache/pulsar-client-go/pulsar" "github.com/golang/protobuf/proto" - "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" @@ -22,7 +20,6 @@ import ( "github.com/armadaproject/armada/internal/common/ingest/testfixtures" protoutil "github.com/armadaproject/armada/internal/common/proto" "github.com/armadaproject/armada/internal/common/pulsarutils" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutingesterv2/metrics" "github.com/armadaproject/armada/internal/lookoutingesterv2/model" "github.com/armadaproject/armada/pkg/api" @@ -77,15 +74,6 @@ var expectedPendingRun = model.UpdateJobRunInstruction{ JobRunState: pointer.Int32(lookout.JobRunPendingOrdinal), } -var expectedLegacyPendingRun = model.CreateJobRunInstruction{ - RunId: testfixtures.RunIdString, - JobId: testfixtures.JobIdString, - Cluster: testfixtures.ExecutorId, - Leased: &testfixtures.BaseTime, - Pending: &testfixtures.BaseTime, - JobRunState: lookout.JobRunPendingOrdinal, -} - var expectedRunningRun = model.UpdateJobRunInstruction{ RunId: testfixtures.RunIdString, Node: pointer.String(testfixtures.NodeName), @@ -140,6 +128,7 @@ var expectedFailedRun = model.UpdateJobRunInstruction{ Finished: &testfixtures.BaseTime, JobRunState: pointer.Int32(lookout.JobRunFailedOrdinal), Error: []byte(testfixtures.ErrMsg), + Debug: []byte(testfixtures.DebugMsg), ExitCode: pointer.Int32(testfixtures.ExitCode), } @@ -153,14 +142,19 @@ var expectedPreempted = model.UpdateJobInstruction{ State: pointer.Int32(lookout.JobPreemptedOrdinal), LastTransitionTime: &testfixtures.BaseTime, LastTransitionTimeSeconds: pointer.Int64(testfixtures.BaseTime.Unix()), - LatestRunId: pointer.String(testfixtures.RunIdString), } var expectedPreemptedRun = model.UpdateJobRunInstruction{ RunId: testfixtures.RunIdString, Finished: &testfixtures.BaseTime, JobRunState: pointer.Int32(lookout.JobRunPreemptedOrdinal), - Error: []byte("preempted by non armada pod"), + Error: []byte("preempted"), +} + +var expectedCancelledRun = model.UpdateJobRunInstruction{ + RunId: testfixtures.RunIdString, + Finished: &testfixtures.BaseTime, + JobRunState: pointer.Int32(lookout.JobRunCancelledOrdinal), } func TestConvert(t *testing.T) { @@ -208,49 +202,13 @@ func TestConvert(t *testing.T) { }, } - expectedCreateUserAnnotations := []*model.CreateUserAnnotationInstruction{ - { - JobId: testfixtures.JobIdString, - Key: "a", - Value: "0", - Queue: testfixtures.Queue, - Jobset: testfixtures.JobSetName, - }, - { - JobId: testfixtures.JobIdString, - Key: "b", - Value: "1", - Queue: testfixtures.Queue, - Jobset: testfixtures.JobSetName, - }, - } - - otherJobIdUlid := util.ULID() - otherJobId := util.StringFromUlid(otherJobIdUlid) - otherJobIdProto := armadaevents.ProtoUuidFromUlid(otherJobIdUlid) - - otherRunIdUuid, err := uuid.NewUUID() - assert.NoError(t, err) - otherRunIdProto := armadaevents.ProtoUuidFromUuid(otherRunIdUuid) - - preempted, err := testfixtures.DeepCopy(testfixtures.JobPreempted) - assert.NoError(t, err) - preempted.GetJobRunPreempted().PreemptiveJobId = otherJobIdProto - preempted.GetJobRunPreempted().PreemptiveRunId = otherRunIdProto - - preemptedWithPrempteeWithZeroId, err := testfixtures.DeepCopy(testfixtures.JobPreempted) - assert.NoError(t, err) - preemptedWithPrempteeWithZeroId.GetJobRunPreempted().PreemptiveJobId = &armadaevents.Uuid{} - preemptedWithPrempteeWithZeroId.GetJobRunPreempted().PreemptiveRunId = &armadaevents.Uuid{} - cancelledWithReason, err := testfixtures.DeepCopy(testfixtures.JobCancelled) assert.NoError(t, err) cancelledWithReason.GetCancelledJob().Reason = "some reason" tests := map[string]struct { - events *ingest.EventSequencesWithIds - expected *model.InstructionSet - useLegacyEventConversion bool + events *ingest.EventSequencesWithIds + expected *model.InstructionSet }{ "submit": { events: &ingest.EventSequencesWithIds{ @@ -260,11 +218,9 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "happy path single update": { events: &ingest.EventSequencesWithIds{ @@ -279,72 +235,14 @@ func TestConvert(t *testing.T) { MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - }, - useLegacyEventConversion: false, - }, - "happy path multi update": { - events: &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{ - testfixtures.NewEventSequence(submit), - testfixtures.NewEventSequence(testfixtures.Leased), - testfixtures.NewEventSequence(testfixtures.Assigned), - testfixtures.NewEventSequence(testfixtures.Running), - testfixtures.NewEventSequence(testfixtures.JobRunSucceeded), - testfixtures.NewEventSequence(testfixtures.JobSucceeded), - }, - MessageIds: []pulsar.MessageID{ - pulsarutils.NewMessageId(1), - pulsarutils.NewMessageId(2), - pulsarutils.NewMessageId(3), - pulsarutils.NewMessageId(4), - pulsarutils.NewMessageId(5), - }, - }, - expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, - MessageIds: []pulsar.MessageID{ - pulsarutils.NewMessageId(1), - pulsarutils.NewMessageId(2), - pulsarutils.NewMessageId(3), - pulsarutils.NewMessageId(4), - pulsarutils.NewMessageId(5), - }, - }, - useLegacyEventConversion: false, - }, - "happy path single update - legacy": { - events: &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence( - submit, - testfixtures.Leased, - testfixtures.Assigned, - testfixtures.Running, - testfixtures.JobRunSucceeded, - testfixtures.JobSucceeded, - )}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - }, - useLegacyEventConversion: true, }, - "happy path multi update - legacy": { + "happy path multi update": { events: &ingest.EventSequencesWithIds{ EventSequences: []*armadaevents.EventSequence{ testfixtures.NewEventSequence(submit), @@ -363,11 +261,10 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), @@ -376,7 +273,6 @@ func TestConvert(t *testing.T) { pulsarutils.NewMessageId(5), }, }, - useLegacyEventConversion: true, }, "requeued": { events: &ingest.EventSequencesWithIds{ @@ -387,9 +283,8 @@ func TestConvert(t *testing.T) { JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobRequeued}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: false, }, - "cancelled": { + "job cancelled": { events: &ingest.EventSequencesWithIds{ EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobCancelled)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, @@ -398,9 +293,8 @@ func TestConvert(t *testing.T) { JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobCancelled}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, - "cancelled with reason": { + "job cancelled with reason": { events: &ingest.EventSequencesWithIds{ EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(cancelledWithReason)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, @@ -416,7 +310,16 @@ func TestConvert(t *testing.T) { }}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, + }, + "job run cancelled": { + events: &ingest.EventSequencesWithIds{ + EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobRunCancelled)}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + }, + expected: &model.InstructionSet{ + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedCancelledRun}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + }, }, "reprioritized": { events: &ingest.EventSequencesWithIds{ @@ -427,7 +330,6 @@ func TestConvert(t *testing.T) { JobsToUpdate: []*model.UpdateJobInstruction{&expectedJobReprioritised}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "job run failed": { events: &ingest.EventSequencesWithIds{ @@ -438,7 +340,6 @@ func TestConvert(t *testing.T) { JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedFailedRun}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "job failed": { events: &ingest.EventSequencesWithIds{ @@ -449,7 +350,6 @@ func TestConvert(t *testing.T) { JobsToUpdate: []*model.UpdateJobInstruction{&expectedFailed}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "terminated": { events: &ingest.EventSequencesWithIds{ @@ -459,7 +359,6 @@ func TestConvert(t *testing.T) { expected: &model.InstructionSet{ MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "unschedulable": { events: &ingest.EventSequencesWithIds{ @@ -470,7 +369,6 @@ func TestConvert(t *testing.T) { JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedUnschedulable}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "duplicate submit is ignored": { events: &ingest.EventSequencesWithIds{ @@ -480,53 +378,26 @@ func TestConvert(t *testing.T) { expected: &model.InstructionSet{ MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, - "preempted": { + "job preempted": { events: &ingest.EventSequencesWithIds{ EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobPreempted)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - expected: &model.InstructionSet{ - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPreempted}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPreemptedRun}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - }, - useLegacyEventConversion: true, - }, - "preempted with preemptee": { - events: &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(preempted)}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - }, expected: &model.InstructionSet{ JobsToUpdate: []*model.UpdateJobInstruction{&expectedPreempted}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{{ - RunId: testfixtures.RunIdString, - Finished: &testfixtures.BaseTime, - JobRunState: pointer.Int32(lookout.JobRunPreemptedOrdinal), - Error: []byte(fmt.Sprintf("preempted by job %s", otherJobId)), - }}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, - "preempted with zeroed preemptee id": { + "job run preempted": { events: &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(preemptedWithPrempteeWithZeroId)}, + EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobRunPreempted)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, expected: &model.InstructionSet{ - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPreempted}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{{ - RunId: testfixtures.RunIdString, - Finished: &testfixtures.BaseTime, - JobRunState: pointer.Int32(lookout.JobRunPreemptedOrdinal), - Error: []byte("preempted by non armada pod"), - }}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPreemptedRun}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, - useLegacyEventConversion: true, }, "invalid event without job id or run id": { events: &ingest.EventSequencesWithIds{ @@ -545,14 +416,12 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), }, }, - useLegacyEventConversion: true, }, "invalid event without created time": { events: &ingest.EventSequencesWithIds{ @@ -583,19 +452,17 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - UserAnnotationsToCreate: expectedCreateUserAnnotations, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), }, }, - useLegacyEventConversion: true, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, tc.useLegacyEventConversion) + converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) decompressor := &compress.NoOpDecompressor{} instructionSet := converter.Convert(armadacontext.TODO(), tc.events) require.Equal(t, len(tc.expected.JobsToCreate), len(instructionSet.JobsToCreate)) @@ -622,44 +489,11 @@ func TestConvert(t *testing.T) { assert.Equal(t, tc.expected.JobsToUpdate, instructionSet.JobsToUpdate) assert.Equal(t, tc.expected.JobRunsToCreate, instructionSet.JobRunsToCreate) assert.Equal(t, tc.expected.JobRunsToUpdate, instructionSet.JobRunsToUpdate) - assert.Equal(t, tc.expected.UserAnnotationsToCreate, instructionSet.UserAnnotationsToCreate) assert.Equal(t, tc.expected.MessageIds, instructionSet.MessageIds) }) } } -func TestFailedWithMissingRunId(t *testing.T) { - converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) - instructions := converter.Convert(armadacontext.Background(), &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobLeaseReturned)}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - }) - jobRun := instructions.JobRunsToCreate[0] - assert.NotEqual(t, eventutil.LEGACY_RUN_ID, jobRun.RunId) - expected := &model.InstructionSet{ - JobRunsToCreate: []*model.CreateJobRunInstruction{ - { - JobId: testfixtures.JobIdString, - RunId: jobRun.RunId, - Cluster: testfixtures.ExecutorId, - Pending: &testfixtures.BaseTime, - JobRunState: lookout.JobRunPendingOrdinal, - }, - }, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{ - { - RunId: jobRun.RunId, - Started: &testfixtures.BaseTime, - Finished: &testfixtures.BaseTime, - JobRunState: pointer.Int32(lookout.JobRunLeaseReturnedOrdinal), - Error: []byte(testfixtures.LeaseReturnedMsg), - }, - }, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, - } - assert.Equal(t, expected.JobRunsToUpdate, instructions.JobRunsToUpdate) -} - func TestTruncatesStringsThatAreTooLong(t *testing.T) { longString := strings.Repeat("x", 4000) @@ -667,9 +501,13 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { assert.NoError(t, err) submit.GetSubmitJob().GetMainObject().GetPodSpec().GetPodSpec().PriorityClassName = longString + leased, err := testfixtures.DeepCopy(testfixtures.Leased) + assert.NoError(t, err) + leased.GetJobRunLeased().ExecutorId = longString + leased.GetJobRunLeased().NodeId = longString + assigned, err := testfixtures.DeepCopy(testfixtures.Assigned) assert.NoError(t, err) - assigned.GetJobRunAssigned().GetResourceInfos()[0].GetObjectMeta().ExecutorId = longString running, err := testfixtures.DeepCopy(testfixtures.Running) assert.NoError(t, err) @@ -682,6 +520,7 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { UserId: longString, Events: []*armadaevents.EventSequence_Event{ submit, + leased, assigned, running, }, @@ -689,7 +528,7 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, } - converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) + converter := NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) actual := converter.Convert(armadacontext.TODO(), events) // String lengths obtained from database schema @@ -698,7 +537,8 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { assert.Len(t, actual.JobsToCreate[0].JobSet, 1024) assert.Len(t, *actual.JobsToCreate[0].PriorityClass, 63) assert.Len(t, actual.JobRunsToCreate[0].Cluster, 512) - assert.Len(t, *actual.JobRunsToUpdate[0].Node, 512) + assert.Len(t, *actual.JobRunsToCreate[0].Node, 512) + assert.Len(t, *actual.JobRunsToUpdate[1].Node, 512) } func TestExtractNodeName(t *testing.T) { diff --git a/internal/lookoutingesterv2/lookoutdb/insertion.go b/internal/lookoutingesterv2/lookoutdb/insertion.go index 3179ce60c1f..f63378a0ea0 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion.go @@ -12,7 +12,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/armadaerrors" - "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/database/lookout" "github.com/armadaproject/armada/internal/common/ingest/metrics" "github.com/armadaproject/armada/internal/lookoutingesterv2/model" @@ -52,7 +51,7 @@ func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.Instru // Now we can job updates, annotations and new job runs wg := sync.WaitGroup{} - wg.Add(3) + wg.Add(2) go func() { defer wg.Done() l.UpdateJobs(ctx, jobsToUpdate) @@ -61,10 +60,6 @@ func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.Instru defer wg.Done() l.CreateJobRuns(ctx, instructions.JobRunsToCreate) }() - go func() { - defer wg.Done() - l.CreateUserAnnotations(ctx, instructions.UserAnnotationsToCreate) - }() wg.Wait() @@ -77,61 +72,58 @@ func (l *LookoutDb) CreateJobs(ctx *armadacontext.Context, instructions []*model if len(instructions) == 0 { return } + start := time.Now() err := l.CreateJobsBatch(ctx, instructions) if err != nil { log.WithError(err).Warn("Creating jobs via batch failed, will attempt to insert serially (this might be slow).") l.CreateJobsScalar(ctx, instructions) } + log.Infof("Inserted %d jobs in %s", len(instructions), time.Since(start)) } func (l *LookoutDb) UpdateJobs(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) { if len(instructions) == 0 { return } + start := time.Now() instructions = l.filterEventsForTerminalJobs(ctx, l.db, instructions, l.metrics) err := l.UpdateJobsBatch(ctx, instructions) if err != nil { log.WithError(err).Warn("Updating jobs via batch failed, will attempt to insert serially (this might be slow).") l.UpdateJobsScalar(ctx, instructions) } + log.Infof("Updated %d jobs in %s", len(instructions), time.Since(start)) } func (l *LookoutDb) CreateJobRuns(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) { if len(instructions) == 0 { return } + start := time.Now() err := l.CreateJobRunsBatch(ctx, instructions) if err != nil { log.WithError(err).Warn("Creating job runs via batch failed, will attempt to insert serially (this might be slow).") l.CreateJobRunsScalar(ctx, instructions) } + log.Infof("Inserted %d job runs in %s", len(instructions), time.Since(start)) } func (l *LookoutDb) UpdateJobRuns(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) { if len(instructions) == 0 { return } + start := time.Now() err := l.UpdateJobRunsBatch(ctx, instructions) if err != nil { log.WithError(err).Warn("Updating job runs via batch failed, will attempt to insert serially (this might be slow).") l.UpdateJobRunsScalar(ctx, instructions) } -} - -func (l *LookoutDb) CreateUserAnnotations(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { - if len(instructions) == 0 { - return - } - err := l.CreateUserAnnotationsBatch(ctx, instructions) - if err != nil { - log.WithError(err).Warn("Creating user annotations via batch failed, will attempt to insert serially (this might be slow).") - l.CreateUserAnnotationsScalar(ctx, instructions) - } + log.Infof("Updated %d job runs in %s", len(instructions), time.Since(start)) } func (l *LookoutDb) CreateJobsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) error { return l.withDatabaseRetryInsert(func() error { - tmpTable := database.UniqueTableName("job") + tmpTable := "job_create_tmp" createTmp := func(tx pgx.Tx) error { _, err := tx.Exec(ctx, fmt.Sprintf(` @@ -300,7 +292,7 @@ func (l *LookoutDb) CreateJobsScalar(ctx *armadacontext.Context, instructions [] func (l *LookoutDb) UpdateJobsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobInstruction) error { return l.withDatabaseRetryInsert(func() error { - tmpTable := database.UniqueTableName("job") + tmpTable := "job_update_tmp" createTmp := func(tx pgx.Tx) error { _, err := tx.Exec(ctx, fmt.Sprintf(` @@ -414,11 +406,11 @@ func (l *LookoutDb) UpdateJobsScalar(ctx *armadacontext.Context, instructions [] func (l *LookoutDb) CreateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobRunInstruction) error { return l.withDatabaseRetryInsert(func() error { - tmpTable := database.UniqueTableName("job_run") + tmpTable := "job_run_create_tmp" createTmp := func(tx pgx.Tx) error { _, err := tx.Exec(ctx, fmt.Sprintf(` - CREATE TEMPORARY TABLE %s ( + CREATE TEMPORARY TABLE %s ( run_id varchar(36), job_id varchar(32), cluster varchar(512), @@ -517,7 +509,7 @@ func (l *LookoutDb) CreateJobRunsScalar(ctx *armadacontext.Context, instructions func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions []*model.UpdateJobRunInstruction) error { return l.withDatabaseRetryInsert(func() error { - tmpTable := database.UniqueTableName("job_run") + tmpTable := "job_run_update_tmp" createTmp := func(tx pgx.Tx) error { _, err := tx.Exec(ctx, fmt.Sprintf(` @@ -529,6 +521,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions finished timestamp, job_run_state smallint, error bytea, + debug bytea, exit_code int ) ON COMMIT DROP;`, tmpTable)) if err != nil { @@ -548,6 +541,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions "finished", "job_run_state", "error", + "debug", "exit_code", }, pgx.CopyFromSlice(len(instructions), func(i int) ([]interface{}, error) { @@ -559,6 +553,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions instructions[i].Finished, instructions[i].JobRunState, instructions[i].Error, + instructions[i].Debug, instructions[i].ExitCode, }, nil }), @@ -577,6 +572,7 @@ func (l *LookoutDb) UpdateJobRunsBatch(ctx *armadacontext.Context, instructions finished = coalesce(tmp.finished, job_run.finished), job_run_state = coalesce(tmp.job_run_state, job_run.job_run_state), error = coalesce(tmp.error, job_run.error), + debug = coalesce(tmp.debug, job_run.debug), exit_code = coalesce(tmp.exit_code, job_run.exit_code) FROM %s as tmp where tmp.run_id = job_run.run_id`, tmpTable), ) @@ -599,7 +595,8 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions job_run_state = coalesce($5, job_run_state), error = coalesce($6, error), exit_code = coalesce($7, exit_code), - pending = coalesce($8, pending) + pending = coalesce($8, pending), + debug = coalesce($9, debug) WHERE run_id = $1` for _, i := range instructions { err := l.withDatabaseRetryInsert(func() error { @@ -611,7 +608,8 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions i.JobRunState, i.Error, i.ExitCode, - i.Pending) + i.Pending, + i.Debug) if err != nil { l.metrics.RecordDBError(metrics.DBOperationUpdate) } @@ -623,98 +621,6 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions } } -func (l *LookoutDb) CreateUserAnnotationsBatch(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) error { - return l.withDatabaseRetryInsert(func() error { - tmpTable := database.UniqueTableName("user_annotation_lookup") - - createTmp := func(tx pgx.Tx) error { - _, err := tx.Exec(ctx, fmt.Sprintf(` - CREATE TEMPORARY TABLE %s ( - job_id varchar(32), - key varchar(1024), - value varchar(1024), - queue varchar(512), - jobset varchar(1024) - ) ON COMMIT DROP;`, tmpTable)) - if err != nil { - l.metrics.RecordDBError(metrics.DBOperationCreateTempTable) - } - return err - } - - insertTmp := func(tx pgx.Tx) error { - _, err := tx.CopyFrom(ctx, - pgx.Identifier{tmpTable}, - []string{ - "job_id", - "key", - "value", - "queue", - "jobset", - }, - pgx.CopyFromSlice(len(instructions), func(i int) ([]interface{}, error) { - return []interface{}{ - instructions[i].JobId, - instructions[i].Key, - instructions[i].Value, - instructions[i].Queue, - instructions[i].Jobset, - }, nil - }), - ) - return err - } - - copyToDest := func(tx pgx.Tx) error { - _, err := tx.Exec( - ctx, - fmt.Sprintf(` - INSERT INTO user_annotation_lookup ( - job_id, - key, - value, - queue, - jobset - ) SELECT * from %s - ON CONFLICT DO NOTHING`, tmpTable)) - if err != nil { - l.metrics.RecordDBError(metrics.DBOperationInsert) - } - return err - } - return batchInsert(ctx, l.db, createTmp, insertTmp, copyToDest) - }) -} - -func (l *LookoutDb) CreateUserAnnotationsScalar(ctx *armadacontext.Context, instructions []*model.CreateUserAnnotationInstruction) { - sqlStatement := `INSERT INTO user_annotation_lookup ( - job_id, - key, - value, - queue, - jobset) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT DO NOTHING` - for _, i := range instructions { - err := l.withDatabaseRetryInsert(func() error { - _, err := l.db.Exec(ctx, sqlStatement, - i.JobId, - i.Key, - i.Value, - i.Queue, - i.Jobset) - if err != nil { - l.metrics.RecordDBError(metrics.DBOperationInsert) - } - return err - }) - // TODO- work out what is a retryable error - if err != nil { - log.WithError(err).Warnf("Create annotation run for job %s, key %s failed", i.JobId, i.Key) - } - } -} - func batchInsert(ctx *armadacontext.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, insertTmp func(pgx.Tx) error, copyToDest func(pgx.Tx) error, ) error { @@ -820,6 +726,9 @@ func conflateJobRunUpdates(updates []*model.UpdateJobRunInstruction) []*model.Up if update.Error != nil { existing.Error = update.Error } + if update.Debug != nil { + existing.Debug = update.Debug + } if update.JobRunState != nil { existing.JobRunState = update.JobRunState } @@ -862,7 +771,7 @@ func (l *LookoutDb) filterEventsForTerminalJobs( for i, instruction := range instructions { jobIds[i] = instruction.JobId } - + queryStart := time.Now() rowsRaw, err := l.withDatabaseRetryQuery(func() (interface{}, error) { terminalStates := []int{ lookout.JobSucceededOrdinal, @@ -890,6 +799,7 @@ func (l *LookoutDb) filterEventsForTerminalJobs( terminalJobs[jobId] = int(state) } } + log.Infof("Lookup of terminal states for %d jobs took %s and returned %d results", len(instructions), time.Since(queryStart), len(terminalJobs)) if len(terminalJobs) > 0 { jobInstructionMap := make(map[string]*updateInstructionsForJob) diff --git a/internal/lookoutingesterv2/lookoutdb/insertion_test.go b/internal/lookoutingesterv2/lookoutdb/insertion_test.go index a25d21db294..61cc13aa6f8 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion_test.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion_test.go @@ -14,6 +14,7 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/internal/common/ingest/testfixtures" "github.com/armadaproject/armada/internal/common/pulsarutils" "github.com/armadaproject/armada/internal/lookoutingesterv2/metrics" "github.com/armadaproject/armada/internal/lookoutingesterv2/model" @@ -92,6 +93,7 @@ type JobRunRow struct { Finished *time.Time JobRunState int32 Error []byte + Debug []byte ExitCode *int32 } @@ -126,16 +128,10 @@ func defaultInstructionSet() *model.InstructionSet { Node: pointer.String(nodeName), Started: &startTime, Finished: &finishedTime, + Debug: []byte(testfixtures.DebugMsg), JobRunState: pointer.Int32(lookout.JobRunSucceededOrdinal), ExitCode: pointer.Int32(0), }}, - UserAnnotationsToCreate: []*model.CreateUserAnnotationInstruction{{ - JobId: jobIdString, - Key: "someKey", - Value: "someValue", - Queue: queue, - Jobset: jobSetName, - }}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(3)}, } } @@ -198,14 +194,7 @@ var expectedJobRunAfterUpdate = JobRunRow{ Finished: &finishedTime, JobRunState: lookout.JobRunSucceededOrdinal, ExitCode: pointer.Int32(0), -} - -var expectedUserAnnotation = UserAnnotationRow{ - JobId: jobIdString, - Key: "someKey", - Value: "someValue", - Queue: queue, - JobSet: jobSetName, + Debug: []byte(testfixtures.DebugMsg), } func TestCreateJobsBatch(t *testing.T) { @@ -604,39 +593,6 @@ func TestUpdateJobRunsScalar(t *testing.T) { assert.NoError(t, err) } -func TestCreateUserAnnotationsBatch(t *testing.T) { - err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - ldb := NewLookoutDb(db, fatalErrors, m, 10) - // Need to make sure we have a job - err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) - assert.Nil(t, err) - - // Insert - err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) - assert.Nil(t, err) - annotation := getUserAnnotationLookup(t, db, jobIdString) - assert.Equal(t, expectedUserAnnotation, annotation) - - // Insert again and test that it's idempotent - err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) - assert.Nil(t, err) - annotation = getUserAnnotationLookup(t, db, jobIdString) - assert.Equal(t, expectedUserAnnotation, annotation) - - // If a row is bad then we should return an error and no updates should happen - _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") - assert.NoError(t, err) - invalidAnnotation := &model.CreateUserAnnotationInstruction{ - JobId: invalidId, - } - err = ldb.CreateUserAnnotationsBatch(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) - assert.Error(t, err) - assertNoRows(t, ldb.db, "user_annotation_lookup") - return nil - }) - assert.NoError(t, err) -} - func TestStoreWithEmptyInstructionSet(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, fatalErrors, m, 10) @@ -646,38 +602,6 @@ func TestStoreWithEmptyInstructionSet(t *testing.T) { assert.NoError(t, err) assertNoRows(t, ldb.db, "job") assertNoRows(t, ldb.db, "job_run") - assertNoRows(t, ldb.db, "user_annotation_lookup") - return nil - }) - assert.NoError(t, err) -} - -func TestCreateUserAnnotationsScalar(t *testing.T) { - err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - ldb := NewLookoutDb(db, fatalErrors, m, 10) - // Need to make sure we have a job - err := ldb.CreateJobsBatch(armadacontext.Background(), defaultInstructionSet().JobsToCreate) - assert.Nil(t, err) - - // Insert - ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) - annotation := getUserAnnotationLookup(t, db, jobIdString) - assert.Equal(t, expectedUserAnnotation, annotation) - - // Insert again and test that it's idempotent - ldb.CreateUserAnnotationsScalar(armadacontext.Background(), defaultInstructionSet().UserAnnotationsToCreate) - annotation = getUserAnnotationLookup(t, db, jobIdString) - assert.Equal(t, expectedUserAnnotation, annotation) - - // If a row is bad then we should update the rows we can - _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM user_annotation_lookup") - assert.NoError(t, err) - invalidAnnotation := &model.CreateUserAnnotationInstruction{ - JobId: invalidId, - } - ldb.CreateUserAnnotationsScalar(armadacontext.Background(), append(defaultInstructionSet().UserAnnotationsToCreate, invalidAnnotation)) - annotation = getUserAnnotationLookup(t, ldb.db, jobIdString) - assert.Equal(t, expectedUserAnnotation, annotation) return nil }) assert.NoError(t, err) @@ -692,11 +616,9 @@ func TestStore(t *testing.T) { job := getJob(t, ldb.db, jobIdString) jobRun := getJobRun(t, ldb.db, runIdString) - annotation := getUserAnnotationLookup(t, ldb.db, jobIdString) assert.Equal(t, expectedJobAfterUpdate, job) assert.Equal(t, expectedJobRunAfterUpdate, jobRun) - assert.Equal(t, expectedUserAnnotation, annotation) return nil }) assert.NoError(t, err) @@ -815,12 +737,12 @@ func TestConflateJobRunUpdates(t *testing.T) { // Non-Empty updates = conflateJobRunUpdates([]*model.UpdateJobRunInstruction{ {RunId: runIdString, Started: &baseTime}, - {RunId: runIdString, Node: pointer.String(nodeName)}, + {RunId: runIdString, Node: pointer.String(nodeName), Debug: []byte("some \000 debug \000")}, {RunId: "someOtherJobRun", Started: &baseTime}, }) expected := []*model.UpdateJobRunInstruction{ - {RunId: runIdString, Started: &baseTime, Node: pointer.String(nodeName)}, + {RunId: runIdString, Started: &baseTime, Node: pointer.String(nodeName), Debug: []byte("some \000 debug \000")}, {RunId: "someOtherJobRun", Started: &baseTime}, } @@ -850,9 +772,11 @@ func TestStoreNullValue(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { jobProto := []byte("hello \000 world \000") errorMsg := []byte("some \000 error \000") + debugMsg := []byte("some \000 debug \000") instructions := defaultInstructionSet() instructions.JobsToCreate[0].JobProto = jobProto instructions.JobRunsToUpdate[0].Error = errorMsg + instructions.JobRunsToUpdate[0].Debug = debugMsg ldb := NewLookoutDb(db, fatalErrors, m, 10) // Do the update @@ -864,6 +788,7 @@ func TestStoreNullValue(t *testing.T) { assert.Equal(t, jobProto, job.JobProto) assert.Equal(t, errorMsg, jobRun.Error) + assert.Equal(t, debugMsg, jobRun.Debug) return nil }) assert.NoError(t, err) @@ -1019,7 +944,8 @@ func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { finished, job_run_state, error, - exit_code + exit_code, + debug FROM job_run WHERE run_id = $1`, runId) err := r.Scan( @@ -1033,22 +959,12 @@ func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { &run.JobRunState, &run.Error, &run.ExitCode, + &run.Debug, ) assert.NoError(t, err) return run } -func getUserAnnotationLookup(t *testing.T, db *pgxpool.Pool, jobId string) UserAnnotationRow { - annotation := UserAnnotationRow{} - r := db.QueryRow( - armadacontext.Background(), - `SELECT job_id, key, value, queue, jobset FROM user_annotation_lookup WHERE job_id = $1`, - jobId) - err := r.Scan(&annotation.JobId, &annotation.Key, &annotation.Value, &annotation.Queue, &annotation.JobSet) - assert.NoError(t, err) - return annotation -} - func assertNoRows(t *testing.T, db *pgxpool.Pool, table string) { t.Helper() var count int diff --git a/internal/lookoutingesterv2/model/model.go b/internal/lookoutingesterv2/model/model.go index f62368974ba..f45494c1af1 100644 --- a/internal/lookoutingesterv2/model/model.go +++ b/internal/lookoutingesterv2/model/model.go @@ -40,15 +40,6 @@ type UpdateJobInstruction struct { LatestRunId *string } -// CreateUserAnnotationInstruction is an instruction to create a new entry in the UserAnnotationInstruction table -type CreateUserAnnotationInstruction struct { - JobId string - Key string - Value string - Queue string - Jobset string -} - // CreateJobRunInstruction is an instruction to update an existing row in the jobRuns table type CreateJobRunInstruction struct { RunId string @@ -69,19 +60,19 @@ type UpdateJobRunInstruction struct { Finished *time.Time JobRunState *int32 Error []byte + Debug []byte ExitCode *int32 } // InstructionSet represents a set of instructions to apply to the database. Each type of instruction is stored in its -// own ordered list representign the order it was received. We also store the original message ids corresponding to +// own ordered list representing the order it was received. We also store the original message ids corresponding to // these instructions so that when they are saved to the database, we can ACK the corresponding messages. type InstructionSet struct { - JobsToCreate []*CreateJobInstruction - JobsToUpdate []*UpdateJobInstruction - JobRunsToCreate []*CreateJobRunInstruction - JobRunsToUpdate []*UpdateJobRunInstruction - UserAnnotationsToCreate []*CreateUserAnnotationInstruction - MessageIds []pulsar.MessageID + JobsToCreate []*CreateJobInstruction + JobsToUpdate []*UpdateJobInstruction + JobRunsToCreate []*CreateJobRunInstruction + JobRunsToUpdate []*UpdateJobRunInstruction + MessageIds []pulsar.MessageID } func (i *InstructionSet) GetMessageIDs() []pulsar.MessageID { diff --git a/internal/lookoutv2/application.go b/internal/lookoutv2/application.go index 3b6100292fe..f377701b72e 100644 --- a/internal/lookoutv2/application.go +++ b/internal/lookoutv2/application.go @@ -11,7 +11,7 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database" - "github.com/armadaproject/armada/internal/common/util" + slices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/lookoutv2/configuration" "github.com/armadaproject/armada/internal/lookoutv2/conversions" "github.com/armadaproject/armada/internal/lookoutv2/gen/restapi" @@ -31,12 +31,11 @@ func Serve(configuration configuration.LookoutV2Config) error { return err } - getJobsRepo := repository.NewSqlGetJobsRepository(db, false) - getJobsJsonbRepo := repository.NewSqlGetJobsRepository(db, true) - groupJobsRepo := repository.NewSqlGroupJobsRepository(db, false) - groupJobsJsonbRepo := repository.NewSqlGroupJobsRepository(db, true) + getJobsRepo := repository.NewSqlGetJobsRepository(db) + groupJobsRepo := repository.NewSqlGroupJobsRepository(db) decompressor := compress.NewThreadSafeZlibDecompressor() getJobRunErrorRepo := repository.NewSqlGetJobRunErrorRepository(db, decompressor) + getJobRunDebugMessageRepo := repository.NewSqlGetJobRunDebugMessageRepository(db, decompressor) getJobSpecRepo := repository.NewSqlGetJobSpecRepository(db, decompressor) // create new service API @@ -54,13 +53,9 @@ func Serve(configuration configuration.LookoutV2Config) error { api.GetJobsHandler = operations.GetJobsHandlerFunc( func(params operations.GetJobsParams) middleware.Responder { - filters := util.Map(params.GetJobsRequest.Filters, conversions.FromSwaggerFilter) + filters := slices.Map(params.GetJobsRequest.Filters, conversions.FromSwaggerFilter) order := conversions.FromSwaggerOrder(params.GetJobsRequest.Order) - repo := getJobsRepo - if backend := params.Backend; backend != nil && *backend == "jsonb" { - repo = getJobsJsonbRepo - } - result, err := repo.GetJobs( + result, err := getJobsRepo.GetJobs( armadacontext.New(params.HTTPRequest.Context(), logger), filters, params.GetJobsRequest.ActiveJobSets, @@ -72,20 +67,16 @@ func Serve(configuration configuration.LookoutV2Config) error { return operations.NewGetJobsBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) } return operations.NewGetJobsOK().WithPayload(&operations.GetJobsOKBody{ - Jobs: util.Map(result.Jobs, conversions.ToSwaggerJob), + Jobs: slices.Map(result.Jobs, conversions.ToSwaggerJob), }) }, ) api.GroupJobsHandler = operations.GroupJobsHandlerFunc( func(params operations.GroupJobsParams) middleware.Responder { - filters := util.Map(params.GroupJobsRequest.Filters, conversions.FromSwaggerFilter) + filters := slices.Map(params.GroupJobsRequest.Filters, conversions.FromSwaggerFilter) order := conversions.FromSwaggerOrder(params.GroupJobsRequest.Order) - repo := groupJobsRepo - if backend := params.Backend; backend != nil && *backend == "jsonb" { - repo = groupJobsJsonbRepo - } - result, err := repo.GroupBy( + result, err := groupJobsRepo.GroupBy( armadacontext.New(params.HTTPRequest.Context(), logger), filters, params.GroupJobsRequest.ActiveJobSets, @@ -99,7 +90,7 @@ func Serve(configuration configuration.LookoutV2Config) error { return operations.NewGroupJobsBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) } return operations.NewGroupJobsOK().WithPayload(&operations.GroupJobsOKBody{ - Groups: util.Map(result.Groups, conversions.ToSwaggerGroup), + Groups: slices.Map(result.Groups, conversions.ToSwaggerGroup), }) }, ) @@ -117,6 +108,19 @@ func Serve(configuration configuration.LookoutV2Config) error { }, ) + api.GetJobRunDebugMessageHandler = operations.GetJobRunDebugMessageHandlerFunc( + func(params operations.GetJobRunDebugMessageParams) middleware.Responder { + ctx := armadacontext.New(params.HTTPRequest.Context(), logger) + result, err := getJobRunDebugMessageRepo.GetJobRunDebugMessage(ctx, params.GetJobRunDebugMessageRequest.RunID) + if err != nil { + return operations.NewGetJobRunDebugMessageBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) + } + return operations.NewGetJobRunDebugMessageOK().WithPayload(&operations.GetJobRunDebugMessageOKBody{ + ErrorString: result, + }) + }, + ) + api.GetJobSpecHandler = operations.GetJobSpecHandlerFunc( func(params operations.GetJobSpecParams) middleware.Responder { ctx := armadacontext.New(params.HTTPRequest.Context(), logger) diff --git a/internal/lookoutv2/configuration/types.go b/internal/lookoutv2/configuration/types.go index f40a5e2d2e5..a35fa4f5d1e 100644 --- a/internal/lookoutv2/configuration/types.go +++ b/internal/lookoutv2/configuration/types.go @@ -28,9 +28,11 @@ type TlsConfig struct { } type PrunerConfig struct { - ExpireAfter time.Duration - Timeout time.Duration - BatchSize int + ExpireAfter time.Duration + DeduplicationExpireAfter time.Duration + Timeout time.Duration + BatchSize int + Postgres configuration.PostgresConfig } type CommandSpec struct { diff --git a/internal/lookoutv2/conversions/convert.go b/internal/lookoutv2/conversions/convert.go index 92eb0c4f1fb..826aee76ccc 100644 --- a/internal/lookoutv2/conversions/convert.go +++ b/internal/lookoutv2/conversions/convert.go @@ -40,6 +40,7 @@ func ToSwaggerJob(job *model.Job) *models.Job { Node: job.Node, Cluster: job.Cluster, ExitCode: job.ExitCode, + RuntimeSeconds: job.RuntimeSeconds, } } diff --git a/internal/lookoutv2/gen/models/debug_message.go b/internal/lookoutv2/gen/models/debug_message.go new file mode 100644 index 00000000000..eabb315e7ff --- /dev/null +++ b/internal/lookoutv2/gen/models/debug_message.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DebugMessage debug message +// +// swagger:model debugMessage +type DebugMessage struct { + + // debug message + // Required: true + // Min Length: 1 + DebugMessage string `json:"debugMessage"` +} + +// Validate validates this debug message +func (m *DebugMessage) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDebugMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DebugMessage) validateDebugMessage(formats strfmt.Registry) error { + + if err := validate.RequiredString("debugMessage", "body", m.DebugMessage); err != nil { + return err + } + + if err := validate.MinLength("debugMessage", "body", m.DebugMessage, 1); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this debug message based on context it is used +func (m *DebugMessage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DebugMessage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DebugMessage) UnmarshalBinary(b []byte) error { + var res DebugMessage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/internal/lookoutv2/gen/models/job.go b/internal/lookoutv2/gen/models/job.go index fbab4765659..bc779d38a89 100644 --- a/internal/lookoutv2/gen/models/job.go +++ b/internal/lookoutv2/gen/models/job.go @@ -21,6 +21,10 @@ import ( // swagger:model job type Job struct { + // runtime seconds + // Required: true + RuntimeSeconds int32 `json:"RuntimeSeconds"` + // annotations // Required: true Annotations map[string]string `json:"annotations"` @@ -32,6 +36,10 @@ type Job struct { // Format: date-time Cancelled *strfmt.DateTime `json:"cancelled,omitempty"` + // cluster + // Required: true + Cluster string `json:"cluster"` + // cpu // Required: true CPU int64 `json:"cpu"` @@ -44,6 +52,9 @@ type Job struct { // Required: true EphemeralStorage int64 `json:"ephemeralStorage"` + // exit code + ExitCode *int32 `json:"exitCode,omitempty"` + // gpu // Required: true Gpu int64 `json:"gpu"` @@ -74,6 +85,9 @@ type Job struct { // namespace Namespace *string `json:"namespace,omitempty"` + // node + Node *string `json:"node,omitempty"` + // owner // Required: true // Min Length: 1 @@ -105,24 +119,16 @@ type Job struct { // Min Length: 1 // Format: date-time Submitted strfmt.DateTime `json:"submitted"` - - // node - // Required: false - Node *string `json:"node,omitempty"` - - // cluster - // Required: true - Cluster string `json:"cluster"` - - //exitCode - // Required: false - ExitCode *int32 `json:"exitCode,omitempty"` } // Validate validates this job func (m *Job) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateRuntimeSeconds(formats); err != nil { + res = append(res, err) + } + if err := m.validateAnnotations(formats); err != nil { res = append(res, err) } @@ -131,6 +137,10 @@ func (m *Job) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateCluster(formats); err != nil { + res = append(res, err) + } + if err := m.validateCPU(formats); err != nil { res = append(res, err) } @@ -193,6 +203,15 @@ func (m *Job) Validate(formats strfmt.Registry) error { return nil } +func (m *Job) validateRuntimeSeconds(formats strfmt.Registry) error { + + if err := validate.Required("RuntimeSeconds", "body", int32(m.RuntimeSeconds)); err != nil { + return err + } + + return nil +} + func (m *Job) validateAnnotations(formats strfmt.Registry) error { if err := validate.Required("annotations", "body", m.Annotations); err != nil { @@ -214,6 +233,15 @@ func (m *Job) validateCancelled(formats strfmt.Registry) error { return nil } +func (m *Job) validateCluster(formats strfmt.Registry) error { + + if err := validate.RequiredString("cluster", "body", m.Cluster); err != nil { + return err + } + + return nil +} + func (m *Job) validateCPU(formats strfmt.Registry) error { if err := validate.Required("cpu", "body", int64(m.CPU)); err != nil { diff --git a/internal/lookoutv2/gen/models/run.go b/internal/lookoutv2/gen/models/run.go index c821fb28a5d..c2e46a61319 100644 --- a/internal/lookoutv2/gen/models/run.go +++ b/internal/lookoutv2/gen/models/run.go @@ -34,7 +34,7 @@ type Run struct { // job run state // Required: true - // Enum: [RUN_PENDING RUN_RUNNING RUN_SUCCEEDED RUN_FAILED RUN_TERMINATED RUN_PREEMPTED RUN_UNABLE_TO_SCHEDULE RUN_LEASE_RETURNED RUN_LEASE_EXPIRED RUN_MAX_RUNS_EXCEEDED RUN_LEASED] + // Enum: [RUN_PENDING RUN_RUNNING RUN_SUCCEEDED RUN_FAILED RUN_TERMINATED RUN_PREEMPTED RUN_UNABLE_TO_SCHEDULE RUN_LEASE_RETURNED RUN_LEASE_EXPIRED RUN_MAX_RUNS_EXCEEDED RUN_LEASED RUN_CANCELLED] JobRunState string `json:"jobRunState"` // leased @@ -127,7 +127,7 @@ var runTypeJobRunStatePropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["RUN_PENDING","RUN_RUNNING","RUN_SUCCEEDED","RUN_FAILED","RUN_TERMINATED","RUN_PREEMPTED","RUN_UNABLE_TO_SCHEDULE","RUN_LEASE_RETURNED","RUN_LEASE_EXPIRED","RUN_MAX_RUNS_EXCEEDED","RUN_LEASED"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["RUN_PENDING","RUN_RUNNING","RUN_SUCCEEDED","RUN_FAILED","RUN_TERMINATED","RUN_PREEMPTED","RUN_UNABLE_TO_SCHEDULE","RUN_LEASE_RETURNED","RUN_LEASE_EXPIRED","RUN_MAX_RUNS_EXCEEDED","RUN_LEASED","RUN_CANCELLED"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -169,6 +169,9 @@ const ( // RunJobRunStateRUNLEASED captures enum value "RUN_LEASED" RunJobRunStateRUNLEASED string = "RUN_LEASED" + + // RunJobRunStateRUNCANCELLED captures enum value "RUN_CANCELLED" + RunJobRunStateRUNCANCELLED string = "RUN_CANCELLED" ) // prop value enum diff --git a/internal/lookoutv2/gen/restapi/configure_lookout.go b/internal/lookoutv2/gen/restapi/configure_lookout.go index 4abdaadf90e..733949fde9f 100644 --- a/internal/lookoutv2/gen/restapi/configure_lookout.go +++ b/internal/lookoutv2/gen/restapi/configure_lookout.go @@ -11,9 +11,9 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/middleware" + "golang.org/x/exp/slices" "github.com/armadaproject/armada/internal/common/serve" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutv2/configuration" "github.com/armadaproject/armada/internal/lookoutv2/gen/restapi/operations" ) @@ -125,7 +125,7 @@ func setCacheControl(fileHandler http.Handler) http.Handler { func allowCORS(handler http.Handler, corsAllowedOrigins []string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if origin := r.Header.Get("Origin"); origin != "" && util.ContainsString(corsAllowedOrigins, origin) { + if origin := r.Header.Get("Origin"); origin != "" && slices.Contains(corsAllowedOrigins, origin) { w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Access-Control-Allow-Credentials", "true") if r.Method == "OPTIONS" && r.Header.Get("Access-Control-Request-Method") != "" { diff --git a/internal/lookoutv2/gen/restapi/embedded_spec.go b/internal/lookoutv2/gen/restapi/embedded_spec.go index eb940a1a35d..5b91df1152d 100644 --- a/internal/lookoutv2/gen/restapi/embedded_spec.go +++ b/internal/lookoutv2/gen/restapi/embedded_spec.go @@ -142,6 +142,63 @@ func init() { } } }, + "/api/v1/jobRunDebugMessage": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "operationId": "getJobRunDebugMessage", + "parameters": [ + { + "name": "getJobRunDebugMessageRequest", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "runId" + ], + "properties": { + "runId": { + "type": "string", + "x-nullable": false + } + } + } + } + ], + "responses": { + "200": { + "description": "Returns debug message for specific job run (if present)", + "schema": { + "type": "object", + "properties": { + "errorString": { + "description": "Debug message for individual job run", + "type": "string", + "x-nullable": false + } + } + } + }, + "400": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + }, + "default": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/api/v1/jobRunError": { "post": { "consumes": [ @@ -456,9 +513,16 @@ func init() { "lastTransitionTime", "duplicate", "annotations", - "runs" + "runs", + "cluster", + "RuntimeSeconds" ], "properties": { + "RuntimeSeconds": { + "type": "integer", + "format": "int32", + "x-nullable": false + }, "annotations": { "type": "object", "additionalProperties": { @@ -475,6 +539,10 @@ func init() { "format": "date-time", "x-nullable": true }, + "cluster": { + "type": "string", + "x-nullable": false + }, "cpu": { "type": "integer", "format": "int64", @@ -489,6 +557,11 @@ func init() { "format": "int64", "x-nullable": false }, + "exitCode": { + "type": "integer", + "format": "int32", + "x-nullable": true + }, "gpu": { "type": "integer", "format": "int64", @@ -523,6 +596,10 @@ func init() { "type": "string", "x-nullable": true }, + "node": { + "type": "string", + "x-nullable": true + }, "owner": { "type": "string", "minLength": 1, @@ -629,7 +706,8 @@ func init() { "RUN_LEASE_RETURNED", "RUN_LEASE_EXPIRED", "RUN_MAX_RUNS_EXCEEDED", - "RUN_LEASED" + "RUN_LEASED", + "RUN_CANCELLED" ], "x-nullable": false }, @@ -805,6 +883,63 @@ func init() { } } }, + "/api/v1/jobRunDebugMessage": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "operationId": "getJobRunDebugMessage", + "parameters": [ + { + "name": "getJobRunDebugMessageRequest", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "runId" + ], + "properties": { + "runId": { + "type": "string", + "x-nullable": false + } + } + } + } + ], + "responses": { + "200": { + "description": "Returns debug message for specific job run (if present)", + "schema": { + "type": "object", + "properties": { + "errorString": { + "description": "Debug message for individual job run", + "type": "string", + "x-nullable": false + } + } + } + }, + "400": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + }, + "default": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/api/v1/jobRunError": { "post": { "consumes": [ @@ -1142,9 +1277,16 @@ func init() { "lastTransitionTime", "duplicate", "annotations", - "runs" + "runs", + "cluster", + "RuntimeSeconds" ], "properties": { + "RuntimeSeconds": { + "type": "integer", + "format": "int32", + "x-nullable": false + }, "annotations": { "type": "object", "additionalProperties": { @@ -1161,6 +1303,10 @@ func init() { "format": "date-time", "x-nullable": true }, + "cluster": { + "type": "string", + "x-nullable": false + }, "cpu": { "type": "integer", "format": "int64", @@ -1175,6 +1321,11 @@ func init() { "format": "int64", "x-nullable": false }, + "exitCode": { + "type": "integer", + "format": "int32", + "x-nullable": true + }, "gpu": { "type": "integer", "format": "int64", @@ -1209,6 +1360,10 @@ func init() { "type": "string", "x-nullable": true }, + "node": { + "type": "string", + "x-nullable": true + }, "owner": { "type": "string", "minLength": 1, @@ -1315,7 +1470,8 @@ func init() { "RUN_LEASE_RETURNED", "RUN_LEASE_EXPIRED", "RUN_MAX_RUNS_EXCEEDED", - "RUN_LEASED" + "RUN_LEASED", + "RUN_CANCELLED" ], "x-nullable": false }, diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message.go new file mode 100644 index 00000000000..8e886ff6733 --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message.go @@ -0,0 +1,154 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// GetJobRunDebugMessageHandlerFunc turns a function with the right signature into a get job run debug message handler +type GetJobRunDebugMessageHandlerFunc func(GetJobRunDebugMessageParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetJobRunDebugMessageHandlerFunc) Handle(params GetJobRunDebugMessageParams) middleware.Responder { + return fn(params) +} + +// GetJobRunDebugMessageHandler interface for that can handle valid get job run debug message params +type GetJobRunDebugMessageHandler interface { + Handle(GetJobRunDebugMessageParams) middleware.Responder +} + +// NewGetJobRunDebugMessage creates a new http.Handler for the get job run debug message operation +func NewGetJobRunDebugMessage(ctx *middleware.Context, handler GetJobRunDebugMessageHandler) *GetJobRunDebugMessage { + return &GetJobRunDebugMessage{Context: ctx, Handler: handler} +} + +/* + GetJobRunDebugMessage swagger:route POST /api/v1/jobRunDebugMessage getJobRunDebugMessage + +GetJobRunDebugMessage get job run debug message API +*/ +type GetJobRunDebugMessage struct { + Context *middleware.Context + Handler GetJobRunDebugMessageHandler +} + +func (o *GetJobRunDebugMessage) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetJobRunDebugMessageParams() + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// GetJobRunDebugMessageBody get job run debug message body +// +// swagger:model GetJobRunDebugMessageBody +type GetJobRunDebugMessageBody struct { + + // run Id + // Required: true + RunID string `json:"runId"` +} + +// Validate validates this get job run debug message body +func (o *GetJobRunDebugMessageBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateRunID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetJobRunDebugMessageBody) validateRunID(formats strfmt.Registry) error { + + if err := validate.RequiredString("getJobRunDebugMessageRequest"+"."+"runId", "body", o.RunID); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this get job run debug message body based on context it is used +func (o *GetJobRunDebugMessageBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetJobRunDebugMessageBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetJobRunDebugMessageBody) UnmarshalBinary(b []byte) error { + var res GetJobRunDebugMessageBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +// GetJobRunDebugMessageOKBody get job run debug message o k body +// +// swagger:model GetJobRunDebugMessageOKBody +type GetJobRunDebugMessageOKBody struct { + + // Debug message for individual job run + ErrorString string `json:"errorString,omitempty"` +} + +// Validate validates this get job run debug message o k body +func (o *GetJobRunDebugMessageOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get job run debug message o k body based on context it is used +func (o *GetJobRunDebugMessageOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetJobRunDebugMessageOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetJobRunDebugMessageOKBody) UnmarshalBinary(b []byte) error { + var res GetJobRunDebugMessageOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_parameters.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_parameters.go new file mode 100644 index 00000000000..78281bd4bef --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_parameters.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" +) + +// NewGetJobRunDebugMessageParams creates a new GetJobRunDebugMessageParams object +// +// There are no default values defined in the spec. +func NewGetJobRunDebugMessageParams() GetJobRunDebugMessageParams { + + return GetJobRunDebugMessageParams{} +} + +// GetJobRunDebugMessageParams contains all the bound params for the get job run debug message operation +// typically these are obtained from a http.Request +// +// swagger:parameters getJobRunDebugMessage +type GetJobRunDebugMessageParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + GetJobRunDebugMessageRequest GetJobRunDebugMessageBody +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetJobRunDebugMessageParams() beforehand. +func (o *GetJobRunDebugMessageParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body GetJobRunDebugMessageBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("getJobRunDebugMessageRequest", "body", "")) + } else { + res = append(res, errors.NewParseError("getJobRunDebugMessageRequest", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(context.Background()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.GetJobRunDebugMessageRequest = body + } + } + } else { + res = append(res, errors.Required("getJobRunDebugMessageRequest", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_responses.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_responses.go new file mode 100644 index 00000000000..1e8082c2b77 --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_responses.go @@ -0,0 +1,163 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/armadaproject/armada/internal/lookoutv2/gen/models" +) + +// GetJobRunDebugMessageOKCode is the HTTP code returned for type GetJobRunDebugMessageOK +const GetJobRunDebugMessageOKCode int = 200 + +/* +GetJobRunDebugMessageOK Returns debug message for specific job run (if present) + +swagger:response getJobRunDebugMessageOK +*/ +type GetJobRunDebugMessageOK struct { + + /* + In: Body + */ + Payload *GetJobRunDebugMessageOKBody `json:"body,omitempty"` +} + +// NewGetJobRunDebugMessageOK creates GetJobRunDebugMessageOK with default headers values +func NewGetJobRunDebugMessageOK() *GetJobRunDebugMessageOK { + + return &GetJobRunDebugMessageOK{} +} + +// WithPayload adds the payload to the get job run debug message o k response +func (o *GetJobRunDebugMessageOK) WithPayload(payload *GetJobRunDebugMessageOKBody) *GetJobRunDebugMessageOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job run debug message o k response +func (o *GetJobRunDebugMessageOK) SetPayload(payload *GetJobRunDebugMessageOKBody) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobRunDebugMessageOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetJobRunDebugMessageBadRequestCode is the HTTP code returned for type GetJobRunDebugMessageBadRequest +const GetJobRunDebugMessageBadRequestCode int = 400 + +/* +GetJobRunDebugMessageBadRequest Error response + +swagger:response getJobRunDebugMessageBadRequest +*/ +type GetJobRunDebugMessageBadRequest struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetJobRunDebugMessageBadRequest creates GetJobRunDebugMessageBadRequest with default headers values +func NewGetJobRunDebugMessageBadRequest() *GetJobRunDebugMessageBadRequest { + + return &GetJobRunDebugMessageBadRequest{} +} + +// WithPayload adds the payload to the get job run debug message bad request response +func (o *GetJobRunDebugMessageBadRequest) WithPayload(payload *models.Error) *GetJobRunDebugMessageBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job run debug message bad request response +func (o *GetJobRunDebugMessageBadRequest) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobRunDebugMessageBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +/* +GetJobRunDebugMessageDefault Error response + +swagger:response getJobRunDebugMessageDefault +*/ +type GetJobRunDebugMessageDefault struct { + _statusCode int + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetJobRunDebugMessageDefault creates GetJobRunDebugMessageDefault with default headers values +func NewGetJobRunDebugMessageDefault(code int) *GetJobRunDebugMessageDefault { + if code <= 0 { + code = 500 + } + + return &GetJobRunDebugMessageDefault{ + _statusCode: code, + } +} + +// WithStatusCode adds the status to the get job run debug message default response +func (o *GetJobRunDebugMessageDefault) WithStatusCode(code int) *GetJobRunDebugMessageDefault { + o._statusCode = code + return o +} + +// SetStatusCode sets the status to the get job run debug message default response +func (o *GetJobRunDebugMessageDefault) SetStatusCode(code int) { + o._statusCode = code +} + +// WithPayload adds the payload to the get job run debug message default response +func (o *GetJobRunDebugMessageDefault) WithPayload(payload *models.Error) *GetJobRunDebugMessageDefault { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job run debug message default response +func (o *GetJobRunDebugMessageDefault) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobRunDebugMessageDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(o._statusCode) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_urlbuilder.go b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_urlbuilder.go new file mode 100644 index 00000000000..d7240813282 --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_run_debug_message_urlbuilder.go @@ -0,0 +1,84 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetJobRunDebugMessageURL generates an URL for the get job run debug message operation +type GetJobRunDebugMessageURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetJobRunDebugMessageURL) WithBasePath(bp string) *GetJobRunDebugMessageURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetJobRunDebugMessageURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetJobRunDebugMessageURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/api/v1/jobRunDebugMessage" + + _basePath := o._basePath + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetJobRunDebugMessageURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetJobRunDebugMessageURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetJobRunDebugMessageURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetJobRunDebugMessageURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetJobRunDebugMessageURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetJobRunDebugMessageURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/internal/lookoutv2/gen/restapi/operations/lookout_api.go b/internal/lookoutv2/gen/restapi/operations/lookout_api.go index ec8176f1104..1791b8e29ff 100644 --- a/internal/lookoutv2/gen/restapi/operations/lookout_api.go +++ b/internal/lookoutv2/gen/restapi/operations/lookout_api.go @@ -46,6 +46,9 @@ func NewLookoutAPI(spec *loads.Document) *LookoutAPI { GetHealthHandler: GetHealthHandlerFunc(func(params GetHealthParams) middleware.Responder { return middleware.NotImplemented("operation GetHealth has not yet been implemented") }), + GetJobRunDebugMessageHandler: GetJobRunDebugMessageHandlerFunc(func(params GetJobRunDebugMessageParams) middleware.Responder { + return middleware.NotImplemented("operation GetJobRunDebugMessage has not yet been implemented") + }), GetJobRunErrorHandler: GetJobRunErrorHandlerFunc(func(params GetJobRunErrorParams) middleware.Responder { return middleware.NotImplemented("operation GetJobRunError has not yet been implemented") }), @@ -99,6 +102,8 @@ type LookoutAPI struct { // GetHealthHandler sets the operation handler for the get health operation GetHealthHandler GetHealthHandler + // GetJobRunDebugMessageHandler sets the operation handler for the get job run debug message operation + GetJobRunDebugMessageHandler GetJobRunDebugMessageHandler // GetJobRunErrorHandler sets the operation handler for the get job run error operation GetJobRunErrorHandler GetJobRunErrorHandler // GetJobSpecHandler sets the operation handler for the get job spec operation @@ -190,6 +195,9 @@ func (o *LookoutAPI) Validate() error { if o.GetHealthHandler == nil { unregistered = append(unregistered, "GetHealthHandler") } + if o.GetJobRunDebugMessageHandler == nil { + unregistered = append(unregistered, "GetJobRunDebugMessageHandler") + } if o.GetJobRunErrorHandler == nil { unregistered = append(unregistered, "GetJobRunErrorHandler") } @@ -299,6 +307,10 @@ func (o *LookoutAPI) initHandlerCache() { if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } + o.handlers["POST"]["/api/v1/jobRunDebugMessage"] = NewGetJobRunDebugMessage(o.context, o.GetJobRunDebugMessageHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } o.handlers["POST"]["/api/v1/jobRunError"] = NewGetJobRunError(o.context, o.GetJobRunErrorHandler) if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) diff --git a/internal/lookoutv2/model/model.go b/internal/lookoutv2/model/model.go index 80ebe0df0bf..4a667167027 100644 --- a/internal/lookoutv2/model/model.go +++ b/internal/lookoutv2/model/model.go @@ -43,6 +43,7 @@ type Job struct { Node *string Cluster string ExitCode *int32 + RuntimeSeconds int32 } // PostgreSQLTime is a wrapper around time.Time that converts to UTC when diff --git a/internal/lookoutv2/pruner/pruner.go b/internal/lookoutv2/pruner/pruner.go index 946917fe30a..6ece3dd3962 100644 --- a/internal/lookoutv2/pruner/pruner.go +++ b/internal/lookoutv2/pruner/pruner.go @@ -3,17 +3,50 @@ package pruner import ( "time" + "github.com/hashicorp/go-multierror" "github.com/jackc/pgx/v5" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" ) -func PruneDb(ctx *armadacontext.Context, db *pgx.Conn, keepAfterCompletion time.Duration, batchLimit int, clock clock.Clock) error { +func PruneDb( + ctx *armadacontext.Context, + db *pgx.Conn, + jobLifetime time.Duration, + deduplicationLifetime time.Duration, + batchLimit int, + clock clock.Clock, +) error { + var result *multierror.Error + + if err := deleteJobs(ctx, db, jobLifetime, batchLimit, clock); err != nil { + result = multierror.Append(result, err) + } + + if err := deleteDeduplications(ctx, db, deduplicationLifetime, clock); err != nil { + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} + +func deleteDeduplications(ctx *armadacontext.Context, db *pgx.Conn, deduplicationLifetime time.Duration, clock clock.Clock) error { + cutOffTime := clock.Now().Add(-deduplicationLifetime) + log.Infof("Deleting all rows from job_deduplication older than %s", cutOffTime) + cmdTag, err := db.Exec(ctx, "DELETE FROM job_deduplication WHERE inserted <= $1", cutOffTime) + if err != nil { + return errors.Wrap(err, "error deleting deduplications from postgres") + } + log.Infof("Deleted %d rows", cmdTag.RowsAffected()) + return nil +} + +func deleteJobs(ctx *armadacontext.Context, db *pgx.Conn, jobLifetime time.Duration, batchLimit int, clock clock.Clock) error { now := clock.Now() - cutOffTime := now.Add(-keepAfterCompletion) + cutOffTime := now.Add(-jobLifetime) totalJobsToDelete, err := createJobIdsToDeleteTempTable(ctx, db, cutOffTime) if err != nil { return errors.WithStack(err) @@ -66,6 +99,12 @@ func createJobIdsToDeleteTempTable(ctx *armadacontext.Context, db *pgx.Conn, cut CREATE TEMP TABLE job_ids_to_delete AS ( SELECT job_id FROM job WHERE last_transition_time < $1 + AND state in ( + 4, -- Succeeded + 5, -- Failed + 6, -- Cancelled + 7 -- Preempted + ) )`, cutOffTime) if err != nil { return -1, errors.WithStack(err) @@ -94,7 +133,6 @@ func deleteBatch(ctx *armadacontext.Context, tx pgx.Tx, batchLimit int) (int, er _, err = tx.Exec(ctx, ` DELETE FROM job WHERE job_id in (SELECT job_id from batch); DELETE FROM job_run WHERE job_id in (SELECT job_id from batch); - DELETE FROM user_annotation_lookup WHERE job_id in (SELECT job_id from batch); DELETE FROM job_ids_to_delete WHERE job_id in (SELECT job_id from batch); TRUNCATE TABLE batch;`) if err != nil { diff --git a/internal/lookoutv2/pruner/pruner_test.go b/internal/lookoutv2/pruner/pruner_test.go index b1717b375f6..2e86dd7d88d 100644 --- a/internal/lookoutv2/pruner/pruner_test.go +++ b/internal/lookoutv2/pruner/pruner_test.go @@ -1,17 +1,19 @@ package pruner import ( + "fmt" "testing" "time" "github.com/google/uuid" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutingesterv2/instructions" "github.com/armadaproject/armada/internal/lookoutingesterv2/lookoutdb" @@ -21,12 +23,13 @@ import ( var baseTime, _ = time.Parse("2006-01-02T15:04:05.000Z", "2022-03-01T15:04:05.000Z") -func TestPruneDb(t *testing.T) { - type testJob struct { - jobId string - ts time.Time - } +type testJob struct { + jobId string + ts time.Time + state lookout.JobState +} +func TestPruneDb(t *testing.T) { type testCase struct { testName string expireAfter time.Duration @@ -40,12 +43,13 @@ func TestPruneDb(t *testing.T) { sampleJobIds[i] = util.NewULID() } - manyJobs := func(startIdx, endIdx int, ts time.Time) []testJob { + manyJobs := func(startIdx, endIdx int, state lookout.JobState, ts time.Time) []testJob { var testJobs []testJob for i := startIdx; i < endIdx; i++ { testJobs = append(testJobs, testJob{ jobId: sampleJobIds[i], ts: ts, + state: state, }) } return testJobs @@ -62,16 +66,25 @@ func TestPruneDb(t *testing.T) { testName: "no expired jobs", expireAfter: 10 * time.Hour, jobs: []testJob{ + // Terminated jobs within the expiry { jobId: sampleJobIds[0], ts: baseTime, + state: lookout.JobSucceeded, }, { jobId: sampleJobIds[1], ts: baseTime.Add(-9 * time.Hour), + state: lookout.JobSucceeded, + }, + // Non-terminated job older than the expiry + { + jobId: sampleJobIds[2], + ts: baseTime.Add(-11 * time.Hour), + state: lookout.JobRunning, }, }, - jobIdsLeft: []string{sampleJobIds[0], sampleJobIds[1]}, + jobIdsLeft: []string{sampleJobIds[0], sampleJobIds[1], sampleJobIds[2]}, }, { testName: "expire a job", @@ -79,27 +92,35 @@ func TestPruneDb(t *testing.T) { jobs: []testJob{ { jobId: sampleJobIds[0], - ts: baseTime, + ts: baseTime.Add(-(10*time.Hour + 1*time.Minute)), + state: lookout.JobSucceeded, }, { jobId: sampleJobIds[1], - ts: baseTime.Add(-9 * time.Hour), + ts: baseTime.Add(-(10*time.Hour + 1*time.Minute)), + state: lookout.JobFailed, }, { jobId: sampleJobIds[2], ts: baseTime.Add(-(10*time.Hour + 1*time.Minute)), + state: lookout.JobCancelled, + }, + { + jobId: sampleJobIds[3], + ts: baseTime.Add(-(10*time.Hour + 1*time.Minute)), + state: lookout.JobPreempted, }, }, - jobIdsLeft: []string{sampleJobIds[0], sampleJobIds[1]}, + jobIdsLeft: []string{}, }, { testName: "expire many jobs", expireAfter: 100 * time.Hour, - jobs: util.Concat( - manyJobs(0, 10, baseTime.Add(-300*time.Hour)), - manyJobs(10, 20, baseTime.Add(-200*time.Hour)), - manyJobs(20, 50, baseTime.Add(-(100*time.Hour+5*time.Minute))), - manyJobs(50, 100, baseTime.Add(-99*time.Hour)), + jobs: slices.Concatenate( + manyJobs(0, 10, lookout.JobSucceeded, baseTime.Add(-300*time.Hour)), + manyJobs(10, 20, lookout.JobSucceeded, baseTime.Add(-200*time.Hour)), + manyJobs(20, 50, lookout.JobSucceeded, baseTime.Add(-(100*time.Hour+5*time.Minute))), + manyJobs(50, 100, lookout.JobSucceeded, baseTime.Add(-99*time.Hour)), ), jobIdsLeft: sampleJobIds[50:], }, @@ -108,37 +129,23 @@ func TestPruneDb(t *testing.T) { for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - converter := instructions.NewInstructionConverter(metrics.Get(), "armadaproject.io/", &compress.NoOpCompressor{}, true) + converter := instructions.NewInstructionConverter(metrics.Get(), "armadaproject.io/", &compress.NoOpCompressor{}) store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Minute) defer cancel() for _, tj := range tc.jobs { - runId := uuid.NewString() - repository.NewJobSimulator(converter, store). - Submit("queue", "jobSet", "owner", "namespace", tj.ts, &repository.JobOptions{ - JobId: tj.jobId, - Annotations: map[string]string{ - "armadaproject.io/test-1": "one", - "armadaproject.io/test-2": "two", - }, - }). - Pending(runId, "cluster", tj.ts). - Running(runId, "node", tj.ts). - RunSucceeded(runId, tj.ts). - Succeeded(tj.ts). - Build() + storeJob(tj, store, converter) } dbConn, err := db.Acquire(ctx) assert.NoError(t, err) - err = PruneDb(ctx, dbConn.Conn(), tc.expireAfter, 10, clock.NewFakeClock(baseTime)) + err = PruneDb(ctx, dbConn.Conn(), tc.expireAfter, 0, 10, clock.NewFakeClock(baseTime)) assert.NoError(t, err) queriedJobIdsPerTable := []map[string]bool{ selectStringSet(t, db, "SELECT job_id FROM job"), selectStringSet(t, db, "SELECT DISTINCT job_id FROM job_run"), - selectStringSet(t, db, "SELECT DISTINCT job_id FROM user_annotation_lookup"), } for _, queriedJobs := range queriedJobIdsPerTable { assert.Equal(t, len(tc.jobIdsLeft), len(queriedJobs)) @@ -154,6 +161,47 @@ func TestPruneDb(t *testing.T) { } } +func storeJob(job testJob, db *lookoutdb.LookoutDb, converter *instructions.InstructionConverter) { + runId := uuid.NewString() + simulator := repository.NewJobSimulator(converter, db). + Submit("queue", "jobSet", "owner", "namespace", job.ts, &repository.JobOptions{ + JobId: job.jobId, + Annotations: map[string]string{ + "armadaproject.io/test-1": "one", + "armadaproject.io/test-2": "two", + }, + }). + Lease(runId, "cluster", "node", job.ts). + Pending(runId, "cluster", job.ts). + Running(runId, "node", job.ts) + + switch job.state { + case lookout.JobSucceeded: + simulator. + RunSucceeded(runId, job.ts). + Succeeded(job.ts). + Build() + case lookout.JobFailed: + simulator. + RunFailed(runId, "node", 1, "", "", job.ts). + Failed("node", 1, "", job.ts). + Build() + case lookout.JobCancelled: + simulator. + Cancelled(job.ts). + Build() + case lookout.JobPreempted: + simulator. + Preempted(job.ts). + Build() + case lookout.JobRunning: + simulator. + Build() + default: + panic(fmt.Sprintf("job state %s not supported", job.state)) + } +} + func selectStringSet(t *testing.T, db *pgxpool.Pool, query string) map[string]bool { t.Helper() rows, err := db.Query(armadacontext.TODO(), query) diff --git a/internal/lookoutv2/repository/aggregates.go b/internal/lookoutv2/repository/aggregates.go index c2eef639478..e31ac234c7b 100644 --- a/internal/lookoutv2/repository/aggregates.go +++ b/internal/lookoutv2/repository/aggregates.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutv2/model" ) @@ -90,7 +91,7 @@ func GetStatesForFilter(filters []*model.Filter) []string { stateFilter = f } } - allStates := util.Map(lookout.JobStates, func(jobState lookout.JobState) string { return string(jobState) }) + allStates := slices.Map(lookout.JobStates, func(jobState lookout.JobState) string { return string(jobState) }) if stateFilter == nil { // If no state filter is specified, use all states return allStates diff --git a/internal/lookoutv2/repository/fieldparser.go b/internal/lookoutv2/repository/fieldparser.go index e8ddde0996b..5a61260aed9 100644 --- a/internal/lookoutv2/repository/fieldparser.go +++ b/internal/lookoutv2/repository/fieldparser.go @@ -5,7 +5,7 @@ import ( "math" "time" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/database/lookout" @@ -19,7 +19,7 @@ type FieldParser interface { } type LastTransitionTimeParser struct { - variable pgtype.Numeric + variable pgtype.Float8 } func (fp *LastTransitionTimeParser) GetField() string { @@ -31,12 +31,7 @@ func (fp *LastTransitionTimeParser) GetVariableRef() interface{} { } func (fp *LastTransitionTimeParser) ParseValue() (interface{}, error) { - var dst float64 - err := fp.variable.AssignTo(&dst) - if err != nil { - return "", err - } - t := time.Unix(int64(math.Round(dst)), 0) + t := time.Unix(int64(math.Round(fp.variable.Float64)), 0) return t.Format(time.RFC3339), nil } diff --git a/internal/lookoutv2/repository/frombuilder.go b/internal/lookoutv2/repository/frombuilder.go deleted file mode 100644 index b291ad226eb..00000000000 --- a/internal/lookoutv2/repository/frombuilder.go +++ /dev/null @@ -1,68 +0,0 @@ -package repository - -import ( - "fmt" - "strings" -) - -type JoinType string - -const ( - Left JoinType = "LEFT" - Inner JoinType = "INNER" -) - -type FromBuilder struct { - baseTable string - baseTableAbbrev string - joins []*join -} - -type join struct { - joinType JoinType - table string - abbreviation string - on []string -} - -func NewFromBuilder(baseTable, baseTableAbbrev string) *FromBuilder { - return &FromBuilder{ - baseTable: baseTable, - baseTableAbbrev: baseTableAbbrev, - joins: nil, - } -} - -// Join specifies JOIN with other table -// Include multiple values in the on list to join by multiple columns -// Note: the columns you join on need to have the same names in both tables -func (b *FromBuilder) Join(joinType JoinType, table string, abbrev string, on []string) *FromBuilder { - b.joins = append(b.joins, &join{ - joinType: joinType, - table: table, - abbreviation: abbrev, - on: on, - }) - return b -} - -func (b *FromBuilder) Build() string { - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("FROM %s AS %s", b.baseTable, b.baseTableAbbrev)) - for _, join := range b.joins { - joinConditions := make([]string, len(join.on)) - for i, col := range join.on { - joinConditions[i] = fmt.Sprintf("%[1]s.%[2]s = %[3]s.%[2]s", - b.baseTableAbbrev, - col, - join.abbreviation) - } - fullJoinCondition := strings.Join(joinConditions, " AND ") - sb.WriteString(fmt.Sprintf(" %[1]s JOIN %[2]s AS %[3]s ON %[4]s", - join.joinType, - join.table, - join.abbreviation, - fullJoinCondition)) - } - return sb.String() -} diff --git a/internal/lookoutv2/repository/frombuilder_test.go b/internal/lookoutv2/repository/frombuilder_test.go deleted file mode 100644 index d03a9f5dbae..00000000000 --- a/internal/lookoutv2/repository/frombuilder_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package repository - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFromBuilder_SingleTable(t *testing.T) { - out := NewFromBuilder("job", "j"). - Build() - assert.Equal(t, splitByWhitespace("FROM job AS j"), splitByWhitespace(out)) -} - -func TestFromBuilder_ManyTables(t *testing.T) { - out := NewFromBuilder("job", "j"). - Join(Left, "job_run", "jr", []string{"job_id"}). - Join(Inner, "( SELECT * FROM user_annotation_lookup WHERE key = AND value = )", "ct", []string{"job_id"}). - Join(Inner, "other_table", "ot", []string{"other_column"}). - Join(Inner, "yet_another_table", "yot", []string{"col_a", "col_b"}). - Build() - assert.Equal(t, splitByWhitespace(` - FROM job AS j - LEFT JOIN job_run AS jr ON j.job_id = jr.job_id - INNER JOIN ( - SELECT * FROM user_annotation_lookup - WHERE key = AND value = - ) AS ct ON j.job_id = ct.job_id - INNER JOIN other_table AS ot ON j.other_column = ot.other_column - INNER JOIN yet_another_table AS yot ON j.col_a = yot.col_a AND j.col_b = yot.col_b - `), splitByWhitespace(out)) -} diff --git a/internal/lookoutv2/repository/getjobrundebugmessage.go b/internal/lookoutv2/repository/getjobrundebugmessage.go new file mode 100644 index 00000000000..5b4841dc7c7 --- /dev/null +++ b/internal/lookoutv2/repository/getjobrundebugmessage.go @@ -0,0 +1,44 @@ +package repository + +import ( + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/compress" +) + +type GetJobRunDebugMessageRepository interface { + GetJobRunDebugMessage(ctx *armadacontext.Context, runId string) (string, error) +} + +type SqlGetJobRunDebugMessageRepository struct { + db *pgxpool.Pool + decompressor compress.Decompressor +} + +func NewSqlGetJobRunDebugMessageRepository(db *pgxpool.Pool, decompressor compress.Decompressor) *SqlGetJobRunDebugMessageRepository { + return &SqlGetJobRunDebugMessageRepository{ + db: db, + decompressor: decompressor, + } +} + +func (r *SqlGetJobRunDebugMessageRepository) GetJobRunDebugMessage(ctx *armadacontext.Context, runId string) (string, error) { + var rawBytes []byte + err := r.db.QueryRow(ctx, "SELECT debug FROM job_run WHERE run_id = $1 AND error IS NOT NULL", runId).Scan(&rawBytes) + if err != nil { + if err == pgx.ErrNoRows { + return "", errors.Errorf("no debug message found for run with id %s", runId) + } + return "", err + } + decompressed, err := r.decompressor.Decompress(rawBytes) + if err != nil { + log.WithError(err).Error("failed to decompress") + return "", err + } + return string(decompressed), nil +} diff --git a/internal/lookoutv2/repository/getjobrundebugmessage_test.go b/internal/lookoutv2/repository/getjobrundebugmessage_test.go new file mode 100644 index 00000000000..6c54671a716 --- /dev/null +++ b/internal/lookoutv2/repository/getjobrundebugmessage_test.go @@ -0,0 +1,55 @@ +package repository + +import ( + "testing" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/compress" + "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/internal/lookoutingesterv2/instructions" + "github.com/armadaproject/armada/internal/lookoutingesterv2/lookoutdb" + "github.com/armadaproject/armada/internal/lookoutingesterv2/metrics" +) + +func TestGetJobRunDebugMessage(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) + store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) + + debugMessageStrings := []string{ + "some bad error happened!", + "", + } + for _, expected := range debugMessageStrings { + _ = NewJobSimulator(converter, store). + Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runId, cluster, node, baseTime). + Pending(runId, cluster, baseTime). + Running(runId, node, baseTime). + RunFailed(runId, node, 137, "", expected, baseTime). + Failed(node, 137, "", baseTime). + Build(). + ApiJob() + + repo := NewSqlGetJobRunDebugMessageRepository(db, &compress.NoOpDecompressor{}) + result, err := repo.GetJobRunDebugMessage(armadacontext.TODO(), runId) + assert.NoError(t, err) + assert.Equal(t, expected, result) + } + return nil + }) + assert.NoError(t, err) +} + +func TestGetJobRunDebugMessageNotFound(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + repo := NewSqlGetJobRunDebugMessageRepository(db, &compress.NoOpDecompressor{}) + _, err := repo.GetJobRunDebugMessage(armadacontext.TODO(), runId) + assert.Error(t, err) + return nil + }) + assert.NoError(t, err) +} diff --git a/internal/lookoutv2/repository/getjobrunerror_test.go b/internal/lookoutv2/repository/getjobrunerror_test.go index abf74075822..f2eeb99f13f 100644 --- a/internal/lookoutv2/repository/getjobrunerror_test.go +++ b/internal/lookoutv2/repository/getjobrunerror_test.go @@ -16,7 +16,7 @@ import ( func TestGetJobRunError(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) errorStrings := []string{ @@ -26,9 +26,10 @@ func TestGetJobRunError(t *testing.T) { for _, expected := range errorStrings { _ = NewJobSimulator(converter, store). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runId, cluster, node, baseTime). Pending(runId, cluster, baseTime). Running(runId, node, baseTime). - RunFailed(runId, node, 137, expected, baseTime). + RunFailed(runId, node, 137, expected, "", baseTime). Failed(node, 137, "", baseTime). Build(). ApiJob() diff --git a/internal/lookoutv2/repository/getjobs.go b/internal/lookoutv2/repository/getjobs.go index 86217db0827..42854ee4f52 100644 --- a/internal/lookoutv2/repository/getjobs.go +++ b/internal/lookoutv2/repository/getjobs.go @@ -3,14 +3,11 @@ package repository import ( "database/sql" "encoding/json" - "fmt" - "sort" "time" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" @@ -23,9 +20,9 @@ type GetJobsRepository interface { } type SqlGetJobsRepository struct { - db *pgxpool.Pool - lookoutTables *LookoutTables - useJsonbBackend bool + db *pgxpool.Pool + lookoutTables *LookoutTables + clock clock.Clock } type GetJobsResult struct { @@ -53,100 +50,20 @@ type jobRow struct { cancelReason sql.NullString } -type runRow struct { - jobId string - runId string - cluster string - node sql.NullString - leased sql.NullTime - pending sql.NullTime - started sql.NullTime - finished sql.NullTime - jobRunState int - exitCode sql.NullInt32 -} - -type annotationRow struct { - jobId string - annotationKey string - annotationValue string -} - -func NewSqlGetJobsRepository(db *pgxpool.Pool, useJsonbBackend bool) *SqlGetJobsRepository { +func NewSqlGetJobsRepository(db *pgxpool.Pool) *SqlGetJobsRepository { return &SqlGetJobsRepository{ - db: db, - lookoutTables: NewTables(), - useJsonbBackend: useJsonbBackend, + db: db, + lookoutTables: NewTables(), + clock: clock.RealClock{}, } } func (r *SqlGetJobsRepository) GetJobs(ctx *armadacontext.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, skip int, take int) (*GetJobsResult, error) { - getJobs := r.getJobs - if r.useJsonbBackend { - getJobs = r.getJobsJsonb - } - return getJobs(ctx, filters, activeJobSets, order, skip, take) + return r.getJobs(ctx, filters, activeJobSets, order, skip, take) } func (r *SqlGetJobsRepository) getJobs(ctx *armadacontext.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, skip int, take int) (*GetJobsResult, error) { - var jobRows []*jobRow - var runRows []*runRow - var annotationRows []*annotationRow - - err := pgx.BeginTxFunc(ctx, r.db, pgx.TxOptions{ - IsoLevel: pgx.RepeatableRead, - AccessMode: pgx.ReadWrite, - DeferrableMode: pgx.Deferrable, - }, func(tx pgx.Tx) error { - createTempTableQuery, tempTableName := NewQueryBuilder(r.lookoutTables).CreateTempTable() - logQuery(createTempTableQuery, "CreateTempTable") - _, err := tx.Exec(ctx, createTempTableQuery.Sql, createTempTableQuery.Args...) - if err != nil { - return err - } - - insertQuery, err := NewQueryBuilder(r.lookoutTables).InsertIntoTempTable(tempTableName, filters, activeJobSets, order, skip, take) - if err != nil { - return err - } - logQuery(insertQuery, "InsertIntoTempTable") - _, err = tx.Exec(ctx, insertQuery.Sql, insertQuery.Args...) - if err != nil { - return err - } - - jobRows, err = makeJobRows(ctx, tx, tempTableName) - if err != nil { - log.WithError(err).Error("failed getting job rows") - return err - } - runRows, err = makeRunRows(ctx, tx, tempTableName) - if err != nil { - log.WithError(err).Error("failed getting run rows") - return err - } - - annotationRows, err = makeAnnotationRows(ctx, tx, tempTableName) - if err != nil { - log.WithError(err).Error("failed getting annotation rows") - return err - } - return nil - }) - if err != nil { - return nil, err - } - jobs, err := rowsToJobs(jobRows, runRows, annotationRows) - if err != nil { - return nil, err - } - return &GetJobsResult{ - Jobs: jobs, - }, nil -} - -func (r *SqlGetJobsRepository) getJobsJsonb(ctx *armadacontext.Context, filters []*model.Filter, activeJobSets bool, order *model.Order, skip int, take int) (*GetJobsResult, error) { - query, err := NewQueryBuilder(r.lookoutTables).GetJobsJsonb(filters, activeJobSets, order, skip, take) + query, err := NewQueryBuilder(r.lookoutTables).GetJobs(filters, activeJobSets, order, skip, take) if err != nil { return nil, err } @@ -154,7 +71,6 @@ func (r *SqlGetJobsRepository) getJobsJsonb(ctx *armadacontext.Context, filters var jobs []*model.Job if err := pgx.BeginTxFunc(ctx, r.db, pgx.TxOptions{ IsoLevel: pgx.RepeatableRead, - AccessMode: pgx.ReadWrite, DeferrableMode: pgx.Deferrable, }, func(tx pgx.Tx) error { rows, err := tx.Query(ctx, query.Sql, query.Args...) @@ -206,7 +122,7 @@ func (r *SqlGetJobsRepository) getJobsJsonb(ctx *armadacontext.Context, filters job.Node = lastRun.Node job.Cluster = lastRun.Cluster job.ExitCode = lastRun.ExitCode - + job.RuntimeSeconds = calculateJobRuntime(lastRun.Started, lastRun.Finished, r.clock) } jobs = append(jobs, job) } @@ -217,58 +133,22 @@ func (r *SqlGetJobsRepository) getJobsJsonb(ctx *armadacontext.Context, filters return &GetJobsResult{Jobs: jobs}, nil } -func rowsToJobs(jobRows []*jobRow, runRows []*runRow, annotationRows []*annotationRow) ([]*model.Job, error) { - jobMap := make(map[string]*model.Job) // Map from Job ID to Job - orderedJobIds := make([]string, len(jobRows)) - - for i, row := range jobRows { - job := jobRowToModel(row) - jobMap[row.jobId] = job - orderedJobIds[i] = row.jobId +func calculateJobRuntime(started, finished *model.PostgreSQLTime, clock clock.Clock) int32 { + if started == nil { + return 0 } - for _, row := range runRows { - run := &model.Run{ - Cluster: row.cluster, - ExitCode: database.ParseNullInt32(row.exitCode), - Finished: model.NewPostgreSQLTime(database.ParseNullTime(row.finished)), - JobRunState: row.jobRunState, - Node: database.ParseNullString(row.node), - Leased: model.NewPostgreSQLTime(database.ParseNullTime(row.leased)), - Pending: model.NewPostgreSQLTime(database.ParseNullTime(row.pending)), - RunId: row.runId, - Started: model.NewPostgreSQLTime(database.ParseNullTime(row.started)), - } - job, ok := jobMap[row.jobId] - if !ok { - return nil, errors.Errorf("job row with id %s not found", row.jobId) - } - job.Runs = append(jobMap[row.jobId].Runs, run) + if finished == nil { + now := clock.Now() + return formatDuration(started.Time, now) } - for _, row := range annotationRows { - job, ok := jobMap[row.jobId] - if !ok { - return nil, errors.Errorf("job row with id %s not found", row.jobId) - } - job.Annotations[row.annotationKey] = row.annotationValue - } - - jobs := make([]*model.Job, len(orderedJobIds)) - for i, jobId := range orderedJobIds { - job := jobMap[jobId] - sortRuns(job.Runs) - if len(job.Runs) > 0 { - lastRun := job.Runs[len(job.Runs)-1] // Get the last run - job.Node = lastRun.Node - job.Cluster = lastRun.Cluster - job.ExitCode = lastRun.ExitCode - - } - jobs[i] = job - } + return formatDuration(started.Time, finished.Time) +} - return jobs, nil +func formatDuration(start, end time.Time) int32 { + duration := end.Sub(start).Round(time.Second) + return int32(duration.Seconds()) } func jobRowToModel(row *jobRow) *model.Job { @@ -295,166 +175,3 @@ func jobRowToModel(row *jobRow) *model.Job { CancelReason: database.ParseNullString(row.cancelReason), } } - -func sortRuns(runs []*model.Run) { - sort.Slice(runs, func(i, j int) bool { - timeA, err := getJobRunTime(runs[i]) - if err != nil { - log.WithError(err).Error("failed to get time for run") - return true - } - timeB, err := getJobRunTime(runs[j]) - if err != nil { - log.WithError(err).Error("failed to get time for run") - return true - } - return timeA.Before(timeB) - }) -} - -func getJobRunTime(run *model.Run) (time.Time, error) { - if run.Leased != nil { - return run.Leased.Time, nil - } - if run.Pending != nil { - return run.Pending.Time, nil - } - return time.Time{}, errors.Errorf("error when getting run time for run with id %s", run.RunId) -} - -func makeJobRows(ctx *armadacontext.Context, tx pgx.Tx, tmpTableName string) ([]*jobRow, error) { - query := fmt.Sprintf(` - SELECT - j.job_id, - j.queue, - j.owner, - j.namespace, - j.jobset, - j.cpu, - j.memory, - j.ephemeral_storage, - j.gpu, - j.priority, - j.submitted, - j.cancelled, - j.state, - j.last_transition_time, - j.duplicate, - j.priority_class, - j.latest_run_id, - j.cancel_reason - FROM %s AS t - INNER JOIN job AS j ON t.job_id = j.job_id - `, tmpTableName) - pgxRows, err := tx.Query(ctx, query) - if err != nil { - return nil, err - } - defer pgxRows.Close() - - var rows []*jobRow - for pgxRows.Next() { - var row jobRow - err := pgxRows.Scan( - &row.jobId, - &row.queue, - &row.owner, - &row.namespace, - &row.jobSet, - &row.cpu, - &row.memory, - &row.ephemeralStorage, - &row.gpu, - &row.priority, - &row.submitted, - &row.cancelled, - &row.state, - &row.lastTransitionTime, - &row.duplicate, - &row.priorityClass, - &row.latestRunId, - &row.cancelReason, - ) - if err != nil { - log.WithError(err).Errorf("failed to scan job row at index %d", len(rows)) - } - rows = append(rows, &row) - } - return rows, nil -} - -func makeRunRows(ctx *armadacontext.Context, tx pgx.Tx, tmpTableName string) ([]*runRow, error) { - query := fmt.Sprintf(` - SELECT - jr.job_id, - jr.run_id, - jr.cluster, - jr.node, - jr.leased, - jr.pending, - jr.started, - jr.finished, - jr.job_run_state, - jr.exit_code - FROM %s AS t - INNER JOIN job_run AS jr ON t.job_id = jr.job_id - `, tmpTableName) - pgxRows, err := tx.Query(ctx, query) - if err != nil { - return nil, err - } - defer pgxRows.Close() - - var rows []*runRow - for pgxRows.Next() { - var row runRow - err := pgxRows.Scan( - &row.jobId, - &row.runId, - &row.cluster, - &row.node, - &row.leased, - &row.pending, - &row.started, - &row.finished, - &row.jobRunState, - &row.exitCode, - ) - if err != nil { - log.WithError(err).Errorf("failed to scan run row at index %d", len(rows)) - } - rows = append(rows, &row) - } - return rows, nil -} - -func makeAnnotationRows(ctx *armadacontext.Context, tx pgx.Tx, tempTableName string) ([]*annotationRow, error) { - query := fmt.Sprintf(` - SELECT - ual.job_id, - ual.key, - ual.value - FROM %s AS t - INNER JOIN user_annotation_lookup AS ual ON t.job_id = ual.job_id - `, tempTableName) - pgxRows, err := tx.Query(ctx, query) - if err != nil { - return nil, err - } - defer pgxRows.Close() - - var rows []*annotationRow - for pgxRows.Next() { - var row annotationRow - err := pgxRows.Scan( - &row.jobId, - &row.annotationKey, - &row.annotationValue, - ) - if err != nil { - log.WithError(err).Errorf("failed to scan annotation row at index %d", len(rows)) - } - rows = append(rows, &row) - } - return rows, nil -} diff --git a/internal/lookoutv2/repository/getjobs_test.go b/internal/lookoutv2/repository/getjobs_test.go index 414e2468089..620340850a0 100644 --- a/internal/lookoutv2/repository/getjobs_test.go +++ b/internal/lookoutv2/repository/getjobs_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + clock "k8s.io/utils/clock/testing" + "github.com/google/uuid" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" @@ -52,23 +54,20 @@ var ( } ) -func withGetJobsSetup(f func(*instructions.InstructionConverter, *lookoutdb.LookoutDb, *SqlGetJobsRepository) error) error { - for _, useJsonbBackend := range []bool{false, true} { - if err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) - store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) - repo := NewSqlGetJobsRepository(db, useJsonbBackend) - return f(converter, store, repo) - }); err != nil { - return err - } - } - return nil +func withGetJobsSetup(f func(*instructions.InstructionConverter, *lookoutdb.LookoutDb, *SqlGetJobsRepository, *clock.FakeClock) error) error { + testClock := clock.NewFakeClock(time.Now()) + return lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) + store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) + repo := NewSqlGetJobsRepository(db) + repo.clock = testClock + return f(converter, store, repo, testClock) + }) } func TestGetJobsSingle(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ JobId: jobId, Priority: priority, @@ -82,6 +81,7 @@ func TestGetJobsSingle(t *testing.T) { "hello": "world", }, }). + Lease(runId, cluster, node, baseTime). Pending(runId, cluster, baseTime). Running(runId, node, baseTime). RunSucceeded(runId, baseTime). @@ -99,11 +99,17 @@ func TestGetJobsSingle(t *testing.T) { } func TestGetJobsMultipleRuns(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + firstRunId := uuid.NewString() + secondRunId := uuid.NewString() + + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). - Pending(uuid.NewString(), cluster, baseTime). - Pending(uuid.NewString(), cluster, baseTime.Add(time.Second)). + Lease(firstRunId, cluster, node, baseTime). + Pending(firstRunId, cluster, baseTime). + Lease(secondRunId, cluster, node, baseTime.Add(time.Second)). + Pending(secondRunId, cluster, baseTime.Add(time.Second)). + Lease(runId, cluster, node, baseTime.Add(2*time.Second)). Pending(runId, cluster, baseTime.Add(2*time.Second)). Running(runId, node, baseTime.Add(2*time.Second)). RunSucceeded(runId, baseTime.Add(2*time.Second)). @@ -122,7 +128,7 @@ func TestGetJobsMultipleRuns(t *testing.T) { } func TestOrderByUnsupportedField(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { _, err := repo.GetJobs( armadacontext.TODO(), []*model.Filter{}, @@ -142,7 +148,7 @@ func TestOrderByUnsupportedField(t *testing.T) { } func TestOrderByUnsupportedDirection(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { _, err := repo.GetJobs( armadacontext.TODO(), []*model.Filter{}, @@ -163,26 +169,26 @@ func TestOrderByUnsupportedDirection(t *testing.T) { // Since job ids are ULIDs, it is comparable to sorting by submission time func TestGetJobsOrderByJobId(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { firstId := "01f3j0g1md4qx7z5qb148qnh4d" secondId := "01f3j0g1md4qx7z5qb148qnjjj" thirdId := "01f3j0g1md4qx7z5qb148qnmmm" - third := NewJobSimulator(converter, store). + third := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ JobId: thirdId, }). Build(). Job() - second := NewJobSimulator(converter, store). + second := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ JobId: secondId, }). Build(). Job() - first := NewJobSimulator(converter, store). + first := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ JobId: firstId, }). @@ -233,18 +239,18 @@ func TestGetJobsOrderByJobId(t *testing.T) { } func TestGetJobsOrderBySubmissionTime(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - third := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + third := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime.Add(3*time.Second), basicJobOpts). Build(). Job() - second := NewJobSimulator(converter, store). + second := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime.Add(2*time.Second), basicJobOpts). Build(). Job() - first := NewJobSimulator(converter, store). + first := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() @@ -293,22 +299,25 @@ func TestGetJobsOrderBySubmissionTime(t *testing.T) { } func TestGetJobsOrderByLastTransitionTime(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { runId1 := uuid.NewString() - third := NewJobSimulator(converter, store). + third := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runId1, cluster, node, baseTime). Pending(runId1, cluster, baseTime). - Running(runId1, cluster, baseTime.Add(3*time.Minute)). + Running(runId1, node, baseTime.Add(3*time.Minute)). Build(). Job() - second := NewJobSimulator(converter, store). + runId2 := uuid.NewString() + second := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). - Pending(uuid.NewString(), cluster, baseTime.Add(2*time.Minute)). + Lease(runId2, cluster, node, baseTime.Add(2*time.Minute)). + Pending(runId2, cluster, baseTime.Add(2*time.Minute)). Build(). Job() - first := NewJobSimulator(converter, store). + first := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() @@ -357,7 +366,7 @@ func TestGetJobsOrderByLastTransitionTime(t *testing.T) { } func TestFilterByUnsupportedField(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { _, err := repo.GetJobs( armadacontext.TODO(), []*model.Filter{{ @@ -378,7 +387,7 @@ func TestFilterByUnsupportedField(t *testing.T) { } func TestFilterByUnsupportedMatch(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { _, err := repo.GetJobs( armadacontext.TODO(), []*model.Filter{{ @@ -400,18 +409,18 @@ func TestFilterByUnsupportedMatch(t *testing.T) { } func TestGetJobsById(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{JobId: jobId}). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{JobId: "01f3j0g1md4qx7z5qb148qnaaa"}). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{JobId: "01f3j0g1md4qx7z5qb148qnbbb"}). Build(). Job() @@ -440,28 +449,28 @@ func TestGetJobsById(t *testing.T) { } func TestGetJobsByQueue(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit("queue-2", jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit("queue-3", jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit("other-queue", jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit("something-else", jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() @@ -537,28 +546,28 @@ func TestGetJobsByQueue(t *testing.T) { } func TestGetJobsByJobSet(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, "job\\set\\1", owner, namespace, baseTime, basicJobOpts). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, "job\\set\\2", owner, namespace, baseTime, basicJobOpts). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, "job\\set\\3", owner, namespace, baseTime, basicJobOpts). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, "other-job\\set", owner, namespace, baseTime, basicJobOpts). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, "something-else", owner, namespace, baseTime, basicJobOpts). Build(). Job() @@ -634,28 +643,28 @@ func TestGetJobsByJobSet(t *testing.T) { } func TestGetJobsByOwner(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, "user-2", namespace, baseTime, basicJobOpts). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, "user-3", namespace, baseTime, basicJobOpts). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, "other-user", namespace, baseTime, basicJobOpts). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, "something-else", namespace, baseTime, basicJobOpts). Build(). Job() @@ -731,29 +740,33 @@ func TestGetJobsByOwner(t *testing.T) { } func TestGetJobsByState(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - queued := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + queued := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). Build(). Job() - pending := NewJobSimulator(converter, store). + runId1 := uuid.NewString() + pending := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). - Pending(uuid.NewString(), cluster, baseTime). + Lease(runId1, cluster, node, baseTime). + Pending(runId1, cluster, baseTime). Build(). Job() runId2 := uuid.NewString() - running := NewJobSimulator(converter, store). + running := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runId2, cluster, node, baseTime). Pending(runId2, cluster, baseTime). Running(runId2, node, baseTime). Build(). Job() runId3 := uuid.NewString() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runId3, cluster, node, baseTime). Pending(runId3, cluster, baseTime). Running(runId3, node, baseTime). Succeeded(baseTime). @@ -811,8 +824,8 @@ func TestGetJobsByState(t *testing.T) { } func TestGetJobsByAnnotation(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Annotations: map[string]string{ "annotation-key-1": "annotation-value-1", @@ -822,7 +835,7 @@ func TestGetJobsByAnnotation(t *testing.T) { Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Annotations: map[string]string{ "annotation-key-1": "annotation-value-2", @@ -831,7 +844,7 @@ func TestGetJobsByAnnotation(t *testing.T) { Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Annotations: map[string]string{ "annotation-key-1": "annotation-value-3", @@ -840,7 +853,7 @@ func TestGetJobsByAnnotation(t *testing.T) { Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Annotations: map[string]string{ "annotation-key-2": "annotation-value-1", @@ -849,7 +862,7 @@ func TestGetJobsByAnnotation(t *testing.T) { Build(). Job() - job5 := NewJobSimulator(converter, store). + job5 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Annotations: map[string]string{ "annotation-key-1": "annotation-value-6", @@ -990,29 +1003,29 @@ func TestGetJobsByAnnotation(t *testing.T) { } func TestGetJobsByCpu(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Cpu: resource.MustParse("1"), }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Cpu: resource.MustParse("3"), }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Cpu: resource.MustParse("5"), }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Cpu: resource.MustParse("10"), }). @@ -1133,29 +1146,29 @@ func TestGetJobsByCpu(t *testing.T) { } func TestGetJobsByMemory(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Memory: resource.MustParse("1000"), }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Memory: resource.MustParse("3000"), }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Memory: resource.MustParse("5000"), }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Memory: resource.MustParse("10000"), }). @@ -1276,29 +1289,29 @@ func TestGetJobsByMemory(t *testing.T) { } func TestGetJobsByEphemeralStorage(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ EphemeralStorage: resource.MustParse("1000"), }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ EphemeralStorage: resource.MustParse("3000"), }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ EphemeralStorage: resource.MustParse("5000"), }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ EphemeralStorage: resource.MustParse("10000"), }). @@ -1419,29 +1432,29 @@ func TestGetJobsByEphemeralStorage(t *testing.T) { } func TestGetJobsByGpu(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Gpu: resource.MustParse("1"), }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Gpu: resource.MustParse("3"), }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Gpu: resource.MustParse("5"), }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Gpu: resource.MustParse("8"), }). @@ -1562,29 +1575,29 @@ func TestGetJobsByGpu(t *testing.T) { } func TestGetJobsByPriority(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job1 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Priority: 10, }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Priority: 20, }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Priority: 30, }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ Priority: 40, }). @@ -1705,36 +1718,36 @@ func TestGetJobsByPriority(t *testing.T) { } func TestGetJobsByPriorityClass(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - job := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + job := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ PriorityClass: "priority-class-1", }). Build(). Job() - job2 := NewJobSimulator(converter, store). + job2 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ PriorityClass: "priority-class-2", }). Build(). Job() - job3 := NewJobSimulator(converter, store). + job3 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ PriorityClass: "priority-class-3", }). Build(). Job() - job4 := NewJobSimulator(converter, store). + job4 := NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ PriorityClass: "other-priority-class", }). Build(). Job() - _ = NewJobSimulator(converter, store). + _ = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ PriorityClass: "something-else", }). @@ -1812,12 +1825,12 @@ func TestGetJobsByPriorityClass(t *testing.T) { } func TestGetJobsSkip(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { nJobs := 15 jobs := make([]*model.Job, nJobs) for i := 0; i < nJobs; i++ { jobId := util.NewULID() - jobs[i] = NewJobSimulator(converter, store). + jobs[i] = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{JobId: jobId}). Build(). Job() @@ -1886,12 +1899,12 @@ func TestGetJobsSkip(t *testing.T) { } func TestGetJobsComplex(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { nJobs := 15 jobs := make([]*model.Job, nJobs) for i := 0; i < nJobs; i++ { jobId := util.NewULID() - jobs[i] = NewJobSimulator(converter, store). + jobs[i] = NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ JobId: jobId, Annotations: map[string]string{ @@ -1904,7 +1917,7 @@ func TestGetJobsComplex(t *testing.T) { } for i := 0; i < nJobs; i++ { - NewJobSimulator(converter, store). + NewJobSimulatorWithClock(converter, store, testClock). Submit("other-queue", jobSet, owner, namespace, baseTime, &JobOptions{ JobId: util.NewULID(), Annotations: map[string]string{ @@ -1957,19 +1970,19 @@ func TestGetJobsComplex(t *testing.T) { } func TestGetJobsActiveJobSet(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { - activeJobSet1 := NewJobSimulator(converter, store). + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + activeJobSet1 := NewJobSimulatorWithClock(converter, store, testClock). Submit("queue-1", "job-set-1", owner, namespace, baseTime, &JobOptions{}). Build(). Job() - inactiveJobSet1 := NewJobSimulator(converter, store). + inactiveJobSet1 := NewJobSimulatorWithClock(converter, store, testClock). Submit("queue-1", "job-set-1", owner, namespace, baseTime, &JobOptions{}). Cancelled(baseTime.Add(1 * time.Minute)). Build(). Job() - NewJobSimulator(converter, store). + NewJobSimulatorWithClock(converter, store, testClock). Submit("queue-2", "job-set-2", owner, namespace, baseTime, &JobOptions{}). Cancelled(baseTime.Add(1 * time.Minute)). Build(). @@ -1999,13 +2012,16 @@ func TestGetJobsActiveJobSet(t *testing.T) { } func TestGetJobsWithLatestRunDetails(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { runIdLatest := uuid.NewString() // Simulate job submission and multiple runs, with the latest run being successful - NewJobSimulator(converter, store). + firstRunId := uuid.NewString() + NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). - Pending(uuid.NewString(), "first-cluster", baseTime). - Running(uuid.NewString(), "first-node", baseTime.Add(time.Minute)). + Lease(firstRunId, "first-cluster", "first-node", baseTime). + Pending(firstRunId, "first-cluster", baseTime). + Running(firstRunId, "first-node", baseTime.Add(time.Minute)). + Lease(runIdLatest, "latest-cluster", "latest-node", baseTime.Add(2*time.Minute)). Pending(runIdLatest, "latest-cluster", baseTime.Add(2*time.Minute)). Running(runIdLatest, "latest-node", baseTime.Add(3*time.Minute)). RunSucceeded(runIdLatest, baseTime.Add(4*time.Minute)). @@ -2033,14 +2049,15 @@ func TestGetJobsWithLatestRunDetails(t *testing.T) { } func TestGetJobsWithSpecificRunDetails(t *testing.T) { - err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { runIdSpecific := uuid.NewString() // Simulate job submission and a specific failed run - NewJobSimulator(converter, store). + NewJobSimulatorWithClock(converter, store, testClock). Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Lease(runIdSpecific, "specific-cluster", "specific-node", baseTime). Pending(runIdSpecific, "specific-cluster", baseTime). Running(runIdSpecific, "specific-node", baseTime.Add(time.Minute)). - RunFailed(runIdSpecific, "specific-node", 2, "Specific failure message", baseTime.Add(2*time.Minute)). + RunFailed(runIdSpecific, "specific-node", 2, "Specific failure message", "", baseTime.Add(2*time.Minute)). Build(). Job() @@ -2063,3 +2080,89 @@ func TestGetJobsWithSpecificRunDetails(t *testing.T) { }) require.NoError(t, err) } + +func TestJobRuntimeWhenNoStartOrEnd(t *testing.T) { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + runId := uuid.NewString() + + NewJobSimulatorWithClock(converter, store, testClock). + Submit(queue, jobSet, owner, namespace, time.Now(), basicJobOpts). + Lease(runId, "cluster", "node", time.Now()). + Build(). + Job() + + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 10) + require.NoError(t, err) + require.Len(t, result.Jobs, 1) + + actualRuntime := result.Jobs[0].RuntimeSeconds + expectedRuntime := int32(0) // Runtime should be 0 when job is just leased + assert.Equal(t, expectedRuntime, actualRuntime) + + return nil + }) + require.NoError(t, err) +} + +func TestJobRuntimeWhenStartedButNotFinishedWithClock(t *testing.T) { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + runId := uuid.NewString() + startTime := testClock.Now().UTC() + runningTime := startTime.Add(time.Minute) + + NewJobSimulatorWithClock(converter, store, testClock). + Submit(queue, jobSet, owner, namespace, startTime, basicJobOpts). + Lease(runId, "cluster", "node", startTime). + Pending(runId, "cluster", startTime). + Running(runId, "node", runningTime). + Build(). + Job() + + // Increment time by 5 mins + testClock.SetTime(testClock.Now().Add(time.Minute * 5)) + + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 10) + require.NoError(t, err) + require.Len(t, result.Jobs, 1) + + actualRuntime := result.Jobs[0].RuntimeSeconds + expectedRuntime := int32(240) // We incremented time by 5 mins, but the run started 1 min after start time + assert.Equal(t, expectedRuntime, actualRuntime) + + return nil + }) + require.NoError(t, err) +} + +func TestJobRuntimeWhenRunFinishedWithClock(t *testing.T) { + clk := clock.NewFakeClock(time.Now()) + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository, testClock *clock.FakeClock) error { + runId := uuid.NewString() + startTime := testClock.Now() + endTime := startTime.Add(5 * time.Minute) + runningTime := startTime.Add(time.Minute) + + NewJobSimulatorWithClock(converter, store, clk). + Submit(queue, jobSet, owner, namespace, startTime, basicJobOpts). + Lease(runId, "specific-cluster", "specific-node", startTime). + Pending(runId, "cluster", startTime). + Running(runId, "node", runningTime). + RunFailed(runId, "node", 1, "failed", "debug", endTime). + Build(). + Job() + + // Increment time by 10 mins + testClock.SetTime(testClock.Now().Add(time.Minute * 10)) + + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 10) + require.NoError(t, err) + require.Len(t, result.Jobs, 1) + + actualRuntime := result.Jobs[0].RuntimeSeconds + expectedRuntime := int32(endTime.Sub(runningTime).Seconds()) + assert.Equal(t, expectedRuntime, actualRuntime) + + return nil + }) + require.NoError(t, err) +} diff --git a/internal/lookoutv2/repository/getjobspec_test.go b/internal/lookoutv2/repository/getjobspec_test.go index 2b49eea53bb..81728bb95db 100644 --- a/internal/lookoutv2/repository/getjobspec_test.go +++ b/internal/lookoutv2/repository/getjobspec_test.go @@ -17,7 +17,7 @@ import ( func TestGetJobSpec(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, true) + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) job := NewJobSimulator(converter, store). diff --git a/internal/lookoutv2/repository/groupjobs.go b/internal/lookoutv2/repository/groupjobs.go index 7ee8f53c098..6cd8d945994 100644 --- a/internal/lookoutv2/repository/groupjobs.go +++ b/internal/lookoutv2/repository/groupjobs.go @@ -9,7 +9,7 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/lookoutv2/model" ) @@ -30,18 +30,16 @@ type GroupJobsRepository interface { } type SqlGroupJobsRepository struct { - db *pgxpool.Pool - lookoutTables *LookoutTables - useJsonbBackend bool + db *pgxpool.Pool + lookoutTables *LookoutTables } const stateAggregatePrefix = "state_" -func NewSqlGroupJobsRepository(db *pgxpool.Pool, useJsonbBackend bool) *SqlGroupJobsRepository { +func NewSqlGroupJobsRepository(db *pgxpool.Pool) *SqlGroupJobsRepository { return &SqlGroupJobsRepository{ - db: db, - lookoutTables: NewTables(), - useJsonbBackend: useJsonbBackend, + db: db, + lookoutTables: NewTables(), } } @@ -55,12 +53,7 @@ func (r *SqlGroupJobsRepository) GroupBy( skip int, take int, ) (*GroupByResult, error) { - qb := NewQueryBuilder(r.lookoutTables) - groupBy := qb.GroupBy - if r.useJsonbBackend { - groupBy = qb.GroupByJsonb - } - query, err := groupBy(filters, activeJobSets, order, groupedField, aggregates, skip, take) + query, err := NewQueryBuilder(r.lookoutTables).GroupBy(filters, activeJobSets, order, groupedField, aggregates, skip, take) if err != nil { return nil, err } @@ -115,7 +108,7 @@ func scanGroup(rows pgx.Rows, field string, aggregates []string, filters []*mode for i, parser := range aggregateParsers { aggregateRefs[i] = parser.GetVariableRef() } - varAddresses := util.Concat([]interface{}{groupParser.GetVariableRef(), &count}, aggregateRefs) + varAddresses := slices.Concatenate([]interface{}{groupParser.GetVariableRef(), &count}, aggregateRefs) err := rows.Scan(varAddresses...) if err != nil { return nil, err diff --git a/internal/lookoutv2/repository/groupjobs_test.go b/internal/lookoutv2/repository/groupjobs_test.go index f720c33e530..5b2d7019a47 100644 --- a/internal/lookoutv2/repository/groupjobs_test.go +++ b/internal/lookoutv2/repository/groupjobs_test.go @@ -21,17 +21,12 @@ import ( ) func withGroupJobsSetup(f func(*instructions.InstructionConverter, *lookoutdb.LookoutDb, *SqlGroupJobsRepository) error) error { - for _, useJsonbBackend := range []bool{false, true} { - if err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { - converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}, false) - store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) - repo := NewSqlGroupJobsRepository(db, useJsonbBackend) - return f(converter, store, repo) - }); err != nil { - return err - } - } - return nil + return lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) + store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) + repo := NewSqlGroupJobsRepository(db) + return f(converter, store, repo) + }) } func TestGroupByQueue(t *testing.T) { @@ -1482,7 +1477,7 @@ func makeLeased(opts *createJobsOpts, converter *instructions.InstructionConvert Submit(opts.queue, opts.jobSet, owner, namespace, tSubmit, &JobOptions{ Annotations: opts.annotations, }). - Lease(uuid.NewString(), lastTransitionTime). + Lease(uuid.NewString(), cluster, node, lastTransitionTime). Build() } @@ -1559,7 +1554,7 @@ func makeFailed(opts *createJobsOpts, converter *instructions.InstructionConvert }). Pending(runId, cluster, lastTransitionTime.Add(-2*time.Minute)). Running(runId, cluster, lastTransitionTime.Add(-1*time.Minute)). - RunFailed(runId, node, 1, "error", lastTransitionTime). + RunFailed(runId, node, 1, "error", "debug", lastTransitionTime). Failed(node, 1, "error", lastTransitionTime). Build() } diff --git a/internal/lookoutv2/repository/querybuilder.go b/internal/lookoutv2/repository/querybuilder.go index a58f43a9b94..01255998086 100644 --- a/internal/lookoutv2/repository/querybuilder.go +++ b/internal/lookoutv2/repository/querybuilder.go @@ -2,23 +2,19 @@ package repository import ( "fmt" - "math" "strings" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "k8s.io/utils/strings/slices" + "golang.org/x/exp/slices" - "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/database/lookout" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/lookoutv2/model" ) const ( - countCol = "count" - annotationGroupTableAbbrev = "ual_group" - activeJobSetsTableAbbrev = "active_job_sets" + countCol = "count" + activeJobSetsTableAbbrev = "active_job_sets" ) var ( @@ -57,104 +53,13 @@ type queryColumn struct { abbrev string } -// Represents data required to construct a condition based on a column, it's desired value, and match expression -type queryFilter struct { - column *queryColumn - value interface{} - match string -} - -// Represents data required to construct a sort expression based on a column and a direction. -// The direction can be "ASC" or "DESC" -type queryOrder struct { - column *queryColumn - direction string -} - func NewQueryBuilder(lookoutTables *LookoutTables) *QueryBuilder { return &QueryBuilder{ lookoutTables: lookoutTables, } } -// CreateTempTable creates a temporary table of job ids -// Returns the Query and the name of the temporary table, to be used later in InsertIntoTempTable -func (qb *QueryBuilder) CreateTempTable() (*Query, string) { - tempTable := database.UniqueTableName(jobTable) - sql := fmt.Sprintf(` - CREATE TEMPORARY TABLE %s ( - job_id varchar(32) NOT NULL - ) ON COMMIT DROP`, tempTable) - return &Query{ - Sql: sql, - Args: []interface{}{}, - }, tempTable -} - -// InsertIntoTempTable returns Query that returns Job IDs according to filters, order, skip and take, and inserts them -// in the temp table with name tempTableName -func (qb *QueryBuilder) InsertIntoTempTable(tempTableName string, filters []*model.Filter, activeJobSets bool, order *model.Order, skip, take int) (*Query, error) { - err := qb.validateFilters(filters) - if err != nil { - return nil, errors.Wrap(err, "filters are invalid") - } - err = qb.validateOrder(order) - if err != nil { - return nil, errors.Wrap(err, "order is invalid") - } - normalFilters, annotationFilters := splitFilters(filters) - - fields := util.Map(normalFilters, func(filter *model.Filter) string { return filter.Field }) - if !orderIsNull(order) { - fields = append(fields, order.Field) - } - allCols, err := qb.fieldsToCols(fields) - if err != nil { - return nil, err - } - tablesFromColumns, err := qb.tablesForCols(allCols) - if err != nil { - return nil, err - } - queryTables, err := qb.determineTablesForQuery(tablesFromColumns) - if err != nil { - return nil, err - } - queryFilters, err := qb.makeQueryFilters(normalFilters, queryTables) - if err != nil { - return nil, err - } - queryOrd, err := qb.makeQueryOrder(order, queryTables) - if err != nil { - return nil, err - } - fromBuilder, err := qb.makeFromSql(queryTables, normalFilters, annotationFilters, activeJobSets) - if err != nil { - return nil, err - } - whereSql, err := qb.queryFiltersToSql(queryFilters, true) - if err != nil { - return nil, err - } - orderSql := qb.queryOrderToSql(queryOrd) - abbrev, err := qb.firstTableAbbrev(queryTables) - if err != nil { - return nil, err - } - sql := fmt.Sprintf(` - INSERT INTO %s (job_id) - SELECT %s.job_id - %s - %s - %s - %s - ON CONFLICT DO NOTHING`, - tempTableName, abbrev, fromBuilder.Build(), whereSql, orderSql, limitOffsetSql(skip, take), - ) - return &Query{Sql: sql, Args: qb.args}, nil -} - -func (qb *QueryBuilder) GetJobsJsonb( +func (qb *QueryBuilder) GetJobs( filters []*model.Filter, activeJobSets bool, order *model.Order, @@ -173,12 +78,12 @@ func (qb *QueryBuilder) GetJobsJsonb( activeJobSetsFilter = joinWithActiveJobSetsTable } - where, err := qb.makeWhereJsonb(filters) + where, err := qb.makeWhere(filters) if err != nil { return nil, err } - orderBy, err := qb.makeOrderByJsonb(order) + orderBy, err := qb.makeOrderBy(order) if err != nil { return nil, err } @@ -274,117 +179,6 @@ func (qb *QueryBuilder) GroupBy( return nil, errors.Wrap(err, "group field is invalid") } - normalFilters, annotationFilters := splitFilters(filters) - fields := util.Concat( - util.Map(normalFilters, func(filter *model.Filter) string { return filter.Field }), - aggregates, - ) - if !orderIsNull(order) && order.Field != countCol { // count does not correspond to a column in any table - fields = append(fields, order.Field) - } - allCols, err := qb.fieldsToCols(fields) - if err != nil { - return nil, err - } - tablesFromColumns, err := qb.tablesForCols(allCols) - if err != nil { - return nil, err - } - queryTables, err := qb.determineTablesForQuery(tablesFromColumns) - if err != nil { - return nil, err - } - queryFilters, err := qb.makeQueryFilters(normalFilters, queryTables) - if err != nil { - return nil, err - } - - fromBuilder, err := qb.makeFromSql(queryTables, normalFilters, annotationFilters, activeJobSets) - if err != nil { - return nil, err - } - var groupCol *queryColumn - if groupedField.IsAnnotation { - groupCol = &queryColumn{ - name: annotationValueCol, - table: userAnnotationLookupTable, - abbrev: annotationGroupTableAbbrev, - } - annotationGroupTable, err := qb.annotationGroupTable(groupedField.Field, normalFilters) - if err != nil { - return nil, err - } - fromBuilder.Join(Inner, fmt.Sprintf("( %s )", annotationGroupTable), annotationGroupTableAbbrev, []string{jobIdCol}) - } else { - groupCol, err = qb.getGroupByQueryCol(groupedField.Field, queryTables) - if err != nil { - return nil, err - } - } - - whereSql, err := qb.queryFiltersToSql(queryFilters, true) - if err != nil { - return nil, err - } - queryAggregators, err := qb.getQueryAggregators(aggregates, normalFilters, queryTables) - if err != nil { - return nil, err - } - selectListSql, err := qb.getAggregatesSql(queryAggregators) - if err != nil { - return nil, err - } - orderSql, err := qb.groupByOrderSql(order) - if err != nil { - return nil, err - } - groupBySql, err := qb.createGroupBySQL(order, groupCol, aggregates) - if err != nil { - return nil, err - } - sql := fmt.Sprintf(` - SELECT %[1]s.%[2]s, %[3]s - %[4]s - %[5]s - %[6]s - %[7]s - %[8]s`, - groupCol.abbrev, groupCol.name, selectListSql, - fromBuilder.Build(), - whereSql, - groupBySql, - orderSql, - limitOffsetSql(skip, take), - ) - return &Query{Sql: sql, Args: qb.args}, nil -} - -func (qb *QueryBuilder) GroupByJsonb( - filters []*model.Filter, - activeJobSets bool, - order *model.Order, - groupedField *model.GroupedField, - aggregates []string, - skip int, - take int, -) (*Query, error) { - err := qb.validateFilters(filters) - if err != nil { - return nil, errors.Wrap(err, "filters are invalid") - } - err = qb.validateGroupOrder(order) - if err != nil { - return nil, errors.Wrap(err, "group order is invalid") - } - err = qb.validateAggregates(aggregates) - if err != nil { - return nil, errors.Wrap(err, "aggregates are invalid") - } - err = qb.validateGroupedField(groupedField) - if err != nil { - return nil, errors.Wrap(err, "group field is invalid") - } - activeJobSetsFilter := "" if activeJobSets { activeJobSetsFilter = joinWithActiveJobSetsTable @@ -392,12 +186,12 @@ func (qb *QueryBuilder) GroupByJsonb( groupByColumn := queryColumn{table: jobTable, abbrev: jobTableAbbrev} if groupedField.IsAnnotation { - groupByColumn.name = qb.annotationColumnJsonb(groupedField.Field) + groupByColumn.name = qb.annotationColumn(groupedField.Field) } else { groupByColumn.name = groupedField.Field } - queryAggregators, err := qb.getQueryAggregators(aggregates, filters, map[string]bool{jobTable: true}) + queryAggregators, err := qb.getQueryAggregators(aggregates, filters) if err != nil { return nil, err } @@ -417,7 +211,7 @@ func (qb *QueryBuilder) GroupByJsonb( // the key in question, so we need to filter out such rows. filters = append(filters, &model.Filter{Field: groupedField.Field, Match: model.MatchExists, IsAnnotation: true}) } - where, err := qb.makeWhereJsonb(filters) + where, err := qb.makeWhere(filters) if err != nil { return nil, err } @@ -477,332 +271,6 @@ func (qb *QueryBuilder) createGroupBySQL(order *model.Order, groupCol *queryColu return expr + fmt.Sprintf(", %s.%s", groupCol.abbrev, col), nil } -func (qb *QueryBuilder) fieldsToCols(fields []string) ([]string, error) { - var cols []string - for _, field := range fields { - col, err := qb.lookoutTables.ColumnFromField(field) - if err != nil { - return nil, err - } - cols = append(cols, col) - } - return cols, nil -} - -// For each column, get all the possible tables we could be querying -// returns a list of string sets, one set of tables per column -func (qb *QueryBuilder) tablesForCols(cols []string) ([]map[string]bool, error) { - var result []map[string]bool - for _, col := range cols { - tables, err := qb.lookoutTables.TablesForColumn(col) - if err != nil { - return nil, err - } - result = append(result, tables) - } - return result, nil -} - -// For each query, we will have to query one or more columns. Each column can be found in one or more tables. -// To optimise queries, we want to find the smallest set of tables that includes all columns required in the query. -// determineTablesForQuery takes a list of sets of tables (one set of tables for each column), and returns the minimal -// set of tables that includes all columns. -// E.g. three tables: A, B, C -// -// Col 1 is in table [A, B] -// Col 2 is in table [B] -// Col 3 is in table [B] -// Col 4 is in table [C] -// Therefore, the smallest set of tables to use is [B, C] -// -// If multiple tables can be used, it picks the one with the highest precedence -func (qb *QueryBuilder) determineTablesForQuery(tablesForColumns []map[string]bool) (map[string]bool, error) { - if len(tablesForColumns) == 0 { - return util.StringListToSet([]string{jobTable}), nil - } - inter := intersection(tablesForColumns) - if len(inter) > 0 { - for _, table := range qb.lookoutTables.TablePrecedence() { - _, ok := inter[table] - if ok { - return util.StringListToSet([]string{table}), nil - } - } - } - - // Compute power set of tables, and select smallest set that includes all columns - nTables := len(qb.lookoutTables.TablePrecedence()) - nSets := int(math.Pow(2, float64(nTables))) - 1 - i := 1 - bestSet := map[string]bool{} - for i <= nSets { - mask := i - j := 0 - set := map[string]bool{} - for mask > 0 { - maybeOne := mask & 1 - if maybeOne == 1 { - set[qb.lookoutTables.TablePrecedence()[j]] = true - } - mask = mask >> 1 - j += 1 - } - - didMatch := true - for _, tablesForCol := range tablesForColumns { - didMatchCol := false - for table := range tablesForCol { - if _, ok := set[table]; ok { - didMatchCol = true - } - } - if !didMatchCol { - didMatch = false - break - } - } - - if didMatch && (len(bestSet) == 0 || len(bestSet) > len(set)) { - bestSet = set - } - i++ - } - - return bestSet, nil -} - -// Takes list of sets and returns their intersection -func intersection(sets []map[string]bool) map[string]bool { - if len(sets) == 0 { - return map[string]bool{} - } - inter := sets[0] - for i := 1; i < len(sets); i++ { - cur := make(map[string]bool) - for s := range inter { - if _, ok := sets[i][s]; ok { - cur[s] = true - } - } - inter = cur - } - return inter -} - -// Split filters into those for normal columns and those for annotations -func splitFilters(filters []*model.Filter) ([]*model.Filter, []*model.Filter) { - var normalFilters []*model.Filter - var annotationFilters []*model.Filter - for _, filter := range filters { - if filter.IsAnnotation { - annotationFilters = append(annotationFilters, filter) - } else { - normalFilters = append(normalFilters, filter) - } - } - return normalFilters, annotationFilters -} - -// makeFromSql creates FROM clause using a set of tables, -// joining them on jobId if multiple tables are present -// If annotations filters are present, inner joins on a table to select matching job ids with all the annotations -func (qb *QueryBuilder) makeFromSql(queryTables map[string]bool, normalFilters []*model.Filter, annotationFilters []*model.Filter, activeJobSets bool) (*FromBuilder, error) { - sortedTables := make([]string, len(queryTables)) - idx := 0 - for _, table := range qb.lookoutTables.TablePrecedence() { - if _, ok := queryTables[table]; !ok { - continue - } - sortedTables[idx] = table - idx++ - } - firstAbbrev, err := qb.lookoutTables.TableAbbrev(sortedTables[0]) - if err != nil { - return nil, err - } - - fromBuilder := NewFromBuilder(sortedTables[0], firstAbbrev) - - for i := 1; i < len(sortedTables); i++ { - table := sortedTables[i] - abbrev, err := qb.lookoutTables.TableAbbrev(table) - if err != nil { - return nil, err - } - fromBuilder.Join(Left, table, abbrev, []string{jobIdCol}) - } - - if len(annotationFilters) > 0 { - normalFiltersToUse, err := qb.filtersForAnnotationTable(normalFilters) - if err != nil { - return nil, err - } - - for i := 0; i < len(annotationFilters); i++ { - table, err := qb.annotationFilterTable(annotationFilters[i], normalFiltersToUse) - if err != nil { - return nil, err - } - fromBuilder.Join( - Inner, - fmt.Sprintf("( %s )", table), - fmt.Sprintf("%s%d", userAnnotationLookupTableAbbrev, i), - []string{jobIdCol}) - } - } - - if activeJobSets { - fromBuilder.Join( - Inner, - activeJobSetsTable, - activeJobSetsTableAbbrev, - []string{queueCol, jobSetCol}, - ) - } - - return fromBuilder, nil -} - -func (qb *QueryBuilder) annotationFilterTable(annotationFilter *model.Filter, normalFilters []*model.Filter) (string, error) { - if !annotationFilter.IsAnnotation { - return "", errors.New("no annotation filter specified") - } - - queryFilters, err := qb.makeQueryFilters(normalFilters, util.StringListToSet([]string{userAnnotationLookupTable})) - if err != nil { - return "", err - } - whereSql, err := qb.queryFiltersToSql(queryFilters, false) - if err != nil { - return "", err - } - annotationFilterCondition, err := qb.annotationFilterCondition(annotationFilter) - if err != nil { - return "", err - } - if whereSql != "" { - whereSql = fmt.Sprintf("%s AND %s", whereSql, annotationFilterCondition) - } else { - whereSql = fmt.Sprintf("WHERE %s", annotationFilterCondition) - } - return fmt.Sprintf("SELECT %s FROM %s %s", jobIdCol, userAnnotationLookupTable, whereSql), nil -} - -func (qb *QueryBuilder) annotationGroupTable(key string, normalFilters []*model.Filter) (string, error) { - normalFiltersToUse, err := qb.filtersForAnnotationTable(normalFilters) - if err != nil { - return "", err - } - queryFilters, err := qb.makeQueryFilters(normalFiltersToUse, util.StringListToSet([]string{userAnnotationLookupTable})) - if err != nil { - return "", err - } - whereSql, err := qb.queryFiltersToSql(queryFilters, false) - if err != nil { - return "", err - } - keyEncoded, err := qb.valueForMatch(key, model.MatchExact) - if err != nil { - return "", err - } - annotationKeyCondition := fmt.Sprintf("key = %s", keyEncoded) - if whereSql != "" { - whereSql = fmt.Sprintf("%s AND %s", whereSql, annotationKeyCondition) - } else { - whereSql = fmt.Sprintf("WHERE %s", annotationKeyCondition) - } - return fmt.Sprintf("SELECT %s, %s FROM %s %s", jobIdCol, annotationValueCol, userAnnotationLookupTable, whereSql), nil -} - -// Only use filters on columns that are present in user_annotation_lookup table -func (qb *QueryBuilder) filtersForAnnotationTable(normalFilters []*model.Filter) ([]*model.Filter, error) { - var normalFiltersToUse []*model.Filter - for _, filter := range normalFilters { - column, err := qb.lookoutTables.ColumnFromField(filter.Field) - if err != nil { - return nil, err - } - tables, err := qb.lookoutTables.TablesForColumn(column) - if err != nil { - return nil, err - } - if _, ok := tables[userAnnotationLookupTable]; ok { - normalFiltersToUse = append(normalFiltersToUse, filter) - } - } - return normalFiltersToUse, nil -} - -func (qb *QueryBuilder) annotationFilterCondition(annotationFilter *model.Filter) (string, error) { - key, err := qb.valueForMatch(annotationFilter.Field, model.MatchExact) - if err != nil { - return "", err - } - if annotationFilter.Match == model.MatchExists { - return fmt.Sprintf("%s = %s", annotationKeyCol, key), nil - } - comparator, err := operatorForMatch(annotationFilter.Match) - if err != nil { - return "", err - } - value, err := qb.valueForMatch(annotationFilter.Value, annotationFilter.Match) - if err != nil { - return "", err - } - return fmt.Sprintf("%s = %s AND %s %s %s", annotationKeyCol, key, annotationValueCol, comparator, value), nil -} - -// Get abbreviation for highest precedence table out of a set of tables -func (qb *QueryBuilder) firstTableAbbrev(queryTables map[string]bool) (string, error) { - for _, table := range qb.lookoutTables.TablePrecedence() { - if _, ok := queryTables[table]; ok { - abbrev, err := qb.lookoutTables.TableAbbrev(table) - if err != nil { - return "", err - } - return abbrev, nil - } - } - return "", errors.New("no tables") -} - -// makeQueryFilters takes a list of external filters and a set of tables to perform the queries on, and returns the -// corresponding list of queryFilters which will be used to generate the WHERE clause for the query -func (qb *QueryBuilder) makeQueryFilters(filters []*model.Filter, queryTables map[string]bool) ([]*queryFilter, error) { - result := make([]*queryFilter, len(filters)) - for i, filter := range filters { - col, err := qb.lookoutTables.ColumnFromField(filter.Field) - if err != nil { - return nil, err - } - table, err := qb.highestPrecedenceTableForColumn(col, queryTables) - if err != nil { - return nil, err - } - abbrev, err := qb.lookoutTables.TableAbbrev(table) - if err != nil { - return nil, err - } - value := filter.Value - if col == stateCol { - value, err = parseValueForState(value) - if err != nil { - return nil, err - } - } - result[i] = &queryFilter{ - column: &queryColumn{ - name: col, - table: table, - abbrev: abbrev, - }, - value: value, - match: filter.Match, - } - } - return result, nil -} - func parseValueForState(value interface{}) (interface{}, error) { switch v := value.(type) { case string: @@ -837,51 +305,13 @@ func parseValueForState(value interface{}) (interface{}, error) { } } -// queryFiltersToSql converts list of queryFilters to WHERE clause -// useAbbrev denotes whether fields should be referred to with abbreviated table form or not -func (qb *QueryBuilder) queryFiltersToSql(filters []*queryFilter, useAbbrev bool) (string, error) { - if len(filters) == 0 { - return "", nil - } - var exprs []string - for _, filter := range filters { - expr, err := qb.comparisonExpr(filter.value, filter.match, filter.column.abbrev, filter.column.name, useAbbrev) - if err != nil { - return "", err - } - exprs = append(exprs, expr) - } - return fmt.Sprintf("WHERE %s", strings.Join(exprs, " AND ")), nil -} - -// Given a value, a match, a table abbreviation and a column name, returns the corresponding comparison expression for -// use in a WHERE clause -func (qb *QueryBuilder) comparisonExpr(value interface{}, match, abbrev, colName string, useAbbrev bool) (string, error) { - comparator, err := operatorForMatch(match) - if err != nil { - return "", err - } - formattedValue, err := qb.valueForMatch(value, match) - if err != nil { - return "", err - } - if !useAbbrev { - return fmt.Sprintf( - "%s %s %s", - colName, comparator, formattedValue), nil - } - return fmt.Sprintf( - "%s.%s %s %s", - abbrev, colName, comparator, formattedValue), nil -} - -func (qb *QueryBuilder) makeWhereJsonb(filters []*model.Filter) (string, error) { +func (qb *QueryBuilder) makeWhere(filters []*model.Filter) (string, error) { if len(filters) == 0 { return "", nil } var clauses []string for _, filter := range filters { - clause, err := qb.makeWhereClauseJsonb(filter) + clause, err := qb.makeWhereClause(filter) if err != nil { return "", err } @@ -890,7 +320,7 @@ func (qb *QueryBuilder) makeWhereJsonb(filters []*model.Filter) (string, error) return fmt.Sprintf("WHERE %s", strings.Join(clauses, " AND ")), nil } -func (qb *QueryBuilder) makeWhereClauseJsonb(filter *model.Filter) (string, error) { +func (qb *QueryBuilder) makeWhereClause(filter *model.Filter) (string, error) { var column string if filter.IsAnnotation { switch filter.Match { @@ -914,7 +344,7 @@ func (qb *QueryBuilder) makeWhereClauseJsonb(filter *model.Filter) (string, erro placeholder := qb.recordValue(filter.Field) return fmt.Sprintf("%s.annotations ? %s", jobTableAbbrev, placeholder), nil default: - column = qb.annotationColumnJsonb(filter.Field) + column = qb.annotationColumn(filter.Field) } } else { var err error @@ -945,12 +375,12 @@ func (qb *QueryBuilder) makeWhereClauseJsonb(filter *model.Filter) (string, erro return fmt.Sprintf("%s.%s %s %s", jobTableAbbrev, column, operator, placeholder), nil } -func (qb *QueryBuilder) annotationColumnJsonb(key string) string { +func (qb *QueryBuilder) annotationColumn(key string) string { placeholder := qb.recordValue(key) return fmt.Sprintf("annotations->>%s", placeholder) } -func (qb *QueryBuilder) makeOrderByJsonb(order *model.Order) (string, error) { +func (qb *QueryBuilder) makeOrderBy(order *model.Order) (string, error) { if orderIsNull(order) { return "", nil } @@ -1024,88 +454,23 @@ func (qb *QueryBuilder) recordValue(value interface{}) string { return fmt.Sprintf("$%d", len(qb.args)) } -// makeQueryOrder takes an external order and a set of tables to perform the queries on, and returns the -// corresponding queryOrder which will be used to generate the ORDER BY clause for the query -func (qb *QueryBuilder) makeQueryOrder(order *model.Order, queryTables map[string]bool) (*queryOrder, error) { - if orderIsNull(order) { - return nil, nil - } - col, err := qb.lookoutTables.ColumnFromField(order.Field) - if err != nil { - return nil, err - } - table, err := qb.highestPrecedenceTableForColumn(col, queryTables) - if err != nil { - return nil, err - } - abbrev, err := qb.lookoutTables.TableAbbrev(table) - if err != nil { - return nil, err - } - return &queryOrder{ - column: &queryColumn{ - name: col, - table: table, - abbrev: abbrev, - }, - direction: order.Direction, - }, nil -} - -// queryOrderToSql converts list of queryFilters to WHERE clause -func (qb *QueryBuilder) queryOrderToSql(order *queryOrder) string { - if order == nil { - return "" - } - return fmt.Sprintf("ORDER BY %s.%s %s", order.column.abbrev, order.column.name, order.direction) -} - -// getGroupByQueryCol finds the groupedField's corresponding column and best table to group by on -func (qb *QueryBuilder) getGroupByQueryCol(field string, queryTables map[string]bool) (*queryColumn, error) { - col, err := qb.lookoutTables.ColumnFromField(field) - if err != nil { - return nil, err - } - return qb.getQueryColumn(col, queryTables) -} - -// Gets the highest precedence table for a given column, among the tables that have already been selected for the query -func (qb *QueryBuilder) highestPrecedenceTableForColumn(col string, queryTables map[string]bool) (string, error) { - colTables, err := qb.lookoutTables.TablesForColumn(col) - if err != nil { - return "", err - } - var selectedTable string - for _, table := range qb.lookoutTables.TablePrecedence() { - _, isInQueryTables := queryTables[table] - _, isInColTables := colTables[table] - if isInQueryTables && isInColTables { - selectedTable = table - break - } - } - if selectedTable == "" { - return "", errors.Errorf("no table found for column %s", col) - } - return selectedTable, nil -} - -func (qb *QueryBuilder) getQueryAggregators(aggregates []string, filters []*model.Filter, queryTables map[string]bool) ([]QueryAggregator, error) { +func (qb *QueryBuilder) getQueryAggregators(aggregates []string, filters []*model.Filter) ([]QueryAggregator, error) { var queryAggregators []QueryAggregator for _, aggregate := range aggregates { col, err := qb.lookoutTables.ColumnFromField(aggregate) if err != nil { return nil, err } - qc, err := qb.getQueryColumn(col, queryTables) - if err != nil { - return nil, err + aggregateColumn := &queryColumn{ + name: col, + table: jobTable, + abbrev: jobTableAbbrev, } aggregateType, err := qb.lookoutTables.GroupAggregateForCol(col) if err != nil { return nil, err } - newQueryAggregators, err := GetAggregatorsForColumn(qc, aggregateType, filters) + newQueryAggregators, err := GetAggregatorsForColumn(aggregateColumn, aggregateType, filters) if err != nil { return nil, err } @@ -1140,22 +505,6 @@ func (qb *QueryBuilder) groupByOrderSql(order *model.Order) (string, error) { return fmt.Sprintf("ORDER BY %s %s", col, order.Direction), nil } -func (qb *QueryBuilder) getQueryColumn(col string, queryTables map[string]bool) (*queryColumn, error) { - table, err := qb.highestPrecedenceTableForColumn(col, queryTables) - if err != nil { - return nil, err - } - abbrev, err := qb.lookoutTables.TableAbbrev(table) - if err != nil { - return nil, err - } - return &queryColumn{ - name: col, - table: table, - abbrev: abbrev, - }, nil -} - func limitOffsetSql(skip, take int) string { // Asking for zero rows is not useful to us, so we take a value of zero to // mean "no limit"; this is consistent with go-swagger, which uses zero as diff --git a/internal/lookoutv2/repository/querybuilder_test.go b/internal/lookoutv2/repository/querybuilder_test.go deleted file mode 100644 index d4496d71367..00000000000 --- a/internal/lookoutv2/repository/querybuilder_test.go +++ /dev/null @@ -1,537 +0,0 @@ -package repository - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/armadaproject/armada/internal/common/database/lookout" - "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/lookoutv2/model" -) - -var testFilters = []*model.Filter{ - { - Field: "queue", - Match: "exact", - Value: "test\\queue", - }, - { - Field: "owner", - Match: "startsWith", - Value: "anon\\one", - }, - { - Field: "1234", - Match: "exact", - Value: "abcd", - IsAnnotation: true, - }, - { - Field: "5678", - Match: "startsWith", - Value: "efgh", - IsAnnotation: true, - }, -} - -func TestIntersection(t *testing.T) { - t.Run("no items", func(t *testing.T) { - result := intersection([]map[string]bool{}) - assert.Equal(t, map[string]bool{}, result) - }) - t.Run("single item in intersection", func(t *testing.T) { - result := intersection([]map[string]bool{ - util.StringListToSet([]string{"a", "b"}), - util.StringListToSet([]string{"b", "c"}), - }) - assert.Equal(t, map[string]bool{"b": true}, result) - }) - t.Run("no items in intersection", func(t *testing.T) { - result := intersection([]map[string]bool{ - util.StringListToSet([]string{"a", "b"}), - util.StringListToSet([]string{"c", "d"}), - }) - assert.Equal(t, map[string]bool{}, result) - }) - t.Run("multiple items in intersection", func(t *testing.T) { - result := intersection([]map[string]bool{ - util.StringListToSet([]string{"a", "b"}), - util.StringListToSet([]string{"a", "b", "d"}), - util.StringListToSet([]string{"a", "b", "d", "x", "y"}), - }) - assert.Equal(t, map[string]bool{"a": true, "b": true}, result) - }) -} - -func TestQueryBuilder_DetermineTablesForQuery(t *testing.T) { - qb := QueryBuilder{lookoutTables: NewTables()} - - t.Run("only use job table if no filters", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{}) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{jobTable}), tables) - }) - - t.Run("only use job table if only querying for field in it", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{ - util.StringListToSet([]string{jobTable}), - }) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{jobTable}), tables) - }) - - t.Run("use highest precedence table if querying for field in multiple tables", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{ - util.StringListToSet([]string{jobTable, jobRunTable, userAnnotationLookupTable}), - }) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{jobTable}), tables) - }) - - t.Run("only use user_annotation_lookup if querying by queue and annotation", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{ - util.StringListToSet([]string{jobTable, jobRunTable, userAnnotationLookupTable}), - util.StringListToSet([]string{userAnnotationLookupTable}), - }) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{userAnnotationLookupTable}), tables) - }) - - t.Run("return multiple tables if there is no overlap", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{ - util.StringListToSet([]string{jobTable, userAnnotationLookupTable}), - util.StringListToSet([]string{jobRunTable}), - }) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{jobTable, jobRunTable}), tables) - }) - - t.Run("many fields with no overlap", func(t *testing.T) { - tables, err := qb.determineTablesForQuery([]map[string]bool{ - util.StringListToSet([]string{jobTable, jobRunTable}), - util.StringListToSet([]string{jobRunTable}), - util.StringListToSet([]string{jobRunTable}), - util.StringListToSet([]string{userAnnotationLookupTable}), - }) - assert.NoError(t, err) - assert.Equal(t, util.StringListToSet([]string{jobRunTable, userAnnotationLookupTable}), tables) - }) -} - -func TestQueryBuilder_CreateTempTable(t *testing.T) { - query, tempTableName := NewQueryBuilder(NewTables()).CreateTempTable() - assert.NotEmpty(t, tempTableName) - assert.Equal(t, splitByWhitespace( - fmt.Sprintf("CREATE TEMPORARY TABLE %s ( job_id varchar(32) NOT NULL ) ON COMMIT DROP", tempTableName)), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{}, query.Args) -} - -func TestQueryBuilder_InsertIntoTempTableEmpty(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).InsertIntoTempTable( - "test_table", - []*model.Filter{}, - false, - nil, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - INSERT INTO test_table (job_id) - SELECT j.job_id - FROM job AS j - LIMIT 10 OFFSET 0 - ON CONFLICT DO NOTHING - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}(nil), query.Args) -} - -func TestQueryBuilder_InsertIntoTempTable(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).InsertIntoTempTable( - "test_table", - testFilters, - false, - &model.Order{ - Direction: "ASC", - Field: "jobId", - }, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - INSERT INTO test_table (job_id) - SELECT j.job_id FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - WHERE j.queue = $7 AND j.owner LIKE $8 - ORDER BY j.job_id ASC - LIMIT 10 OFFSET 0 - ON CONFLICT DO NOTHING - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func TestQueryBuilder_InsertIntoTempTable_ActiveJobSets(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).InsertIntoTempTable( - "test_table", - testFilters, - true, - &model.Order{ - Direction: "ASC", - Field: "jobId", - }, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - INSERT INTO test_table (job_id) - SELECT j.job_id FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - INNER JOIN ( - SELECT DISTINCT queue, jobset - FROM job - WHERE state IN (1, 2, 3, 8) - ) AS active_job_sets ON j.queue = active_job_sets.queue AND j.jobset = active_job_sets.jobset - WHERE j.queue = $7 AND j.owner LIKE $8 - ORDER BY j.job_id ASC - LIMIT 10 OFFSET 0 - ON CONFLICT DO NOTHING - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func TestQueryBuilder_GroupByEmpty(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - []*model.Filter{}, - false, - nil, - &model.GroupedField{ - Field: "jobSet", - }, - []string{}, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, COUNT(*) AS count - FROM job AS j - GROUP BY j.jobset - LIMIT 10 OFFSET 0 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}(nil), query.Args) -} - -func TestQueryBuilder_GroupBy(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - testFilters, - false, - &model.Order{ - Direction: "DESC", - Field: "count", - }, - &model.GroupedField{ - Field: "jobSet", - }, - []string{}, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, COUNT(*) AS count - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - WHERE j.queue = $7 AND j.owner LIKE $8 - GROUP BY j.jobset - ORDER BY count DESC - LIMIT 10 OFFSET 0 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func TestQueryBuilder_GroupBySingleAggregate(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - testFilters, - false, - &model.Order{ - Direction: "ASC", - Field: "submitted", - }, - &model.GroupedField{ - Field: "jobSet", - }, - []string{ - "submitted", - }, - 20, - 100, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, COUNT(*) AS count, MIN(j.submitted) AS submitted - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - WHERE j.queue = $7 AND j.owner LIKE $8 - GROUP BY j.jobset - ORDER BY submitted ASC - LIMIT 100 OFFSET 20 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func TestQueryBuilder_GroupByMultipleAggregates(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - testFilters, - false, - &model.Order{ - Direction: "DESC", - Field: "lastTransitionTime", - }, - &model.GroupedField{ - Field: "jobSet", - }, - []string{ - "lastTransitionTime", - "submitted", - }, - 20, - 100, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, COUNT(*) AS count, AVG(j.last_transition_time_seconds) AS last_transition_time_seconds, MIN(j.submitted) AS submitted - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - WHERE j.queue = $7 AND j.owner LIKE $8 - GROUP BY j.jobset - ORDER BY last_transition_time_seconds DESC - LIMIT 100 OFFSET 20 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func TestQueryBuilder_GroupByStateAggregates(t *testing.T) { - stateFilter := &model.Filter{ - Field: "state", - Match: model.MatchAnyOf, - Value: []string{ - string(lookout.JobQueued), - string(lookout.JobLeased), - string(lookout.JobPending), - string(lookout.JobRunning), - }, - } - query, err := NewQueryBuilder(NewTables()).GroupBy( - append(testFilters, stateFilter), - false, - &model.Order{ - Direction: "DESC", - Field: "lastTransitionTime", - }, - &model.GroupedField{ - Field: "jobSet", - }, - []string{ - "lastTransitionTime", - "submitted", - "state", - }, - 20, - 100, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, - COUNT(*) AS count, - AVG(j.last_transition_time_seconds) AS last_transition_time_seconds, - MIN(j.submitted) AS submitted, - SUM(CASE WHEN j.state = 1 THEN 1 ELSE 0 END) AS state_QUEUED, - SUM(CASE WHEN j.state = 8 THEN 1 ELSE 0 END) AS state_LEASED, - SUM(CASE WHEN j.state = 2 THEN 1 ELSE 0 END) AS state_PENDING, - SUM(CASE WHEN j.state = 3 THEN 1 ELSE 0 END) AS state_RUNNING - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - WHERE j.queue = $7 AND j.owner LIKE $8 AND j.state IN ($9, $10, $11, $12) - GROUP BY j.jobset - ORDER BY last_transition_time_seconds DESC - LIMIT 100 OFFSET 20 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%", 1, 8, 2, 3}, query.Args) -} - -func TestQueryBuilder_GroupByAnnotationMultipleAggregates(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - testFilters, - false, - &model.Order{ - Direction: "DESC", - Field: "lastTransitionTime", - }, - &model.GroupedField{ - Field: "custom_annotation", - IsAnnotation: true, - }, - []string{ - "lastTransitionTime", - "submitted", - }, - 20, - 100, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT ual_group.value, COUNT(*) AS count, AVG(j.last_transition_time_seconds) AS last_transition_time_seconds, MIN(j.submitted) AS submitted - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - INNER JOIN ( - SELECT job_id, value - FROM user_annotation_lookup - WHERE queue = $7 AND key = $8 - ) AS ual_group ON j.job_id = ual_group.job_id - WHERE j.queue = $9 AND j.owner LIKE $10 - GROUP BY ual_group.value - ORDER BY last_transition_time_seconds DESC - LIMIT 100 OFFSET 20 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{ - "test\\queue", - "1234", - "abcd", - "test\\queue", - "5678", - "efgh%", - "test\\queue", - "custom_annotation", - "test\\queue", - "anon\\\\one%", - }, query.Args) -} - -func TestQueryBuilder_GroupBy_ActiveJobSets(t *testing.T) { - query, err := NewQueryBuilder(NewTables()).GroupBy( - testFilters, - true, - &model.Order{ - Direction: "DESC", - Field: "count", - }, - &model.GroupedField{ - Field: "jobSet", - }, - []string{}, - 0, - 10, - ) - assert.NoError(t, err) - assert.Equal(t, splitByWhitespace(` - SELECT j.jobset, COUNT(*) AS count - FROM job AS j - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $1 AND key = $2 AND value = $3 - ) AS ual0 ON j.job_id = ual0.job_id - INNER JOIN ( - SELECT job_id - FROM user_annotation_lookup - WHERE queue = $4 AND key = $5 AND value LIKE $6 - ) AS ual1 ON j.job_id = ual1.job_id - INNER JOIN ( - SELECT DISTINCT queue, jobset - FROM job - WHERE state IN (1, 2, 3, 8) - ) AS active_job_sets ON j.queue = active_job_sets.queue AND j.jobset = active_job_sets.jobset - WHERE j.queue = $7 AND j.owner LIKE $8 - GROUP BY j.jobset - ORDER BY count DESC - LIMIT 10 OFFSET 0 - `), - splitByWhitespace(query.Sql)) - assert.Equal(t, []interface{}{"test\\queue", "1234", "abcd", "test\\queue", "5678", "efgh%", "test\\queue", "anon\\\\one%"}, query.Args) -} - -func splitByWhitespace(s string) []string { - return strings.FieldsFunc(s, splitFn) -} - -func splitFn(r rune) bool { - return r == ' ' || r == '\n' || r == '\t' -} diff --git a/internal/lookoutv2/repository/tables.go b/internal/lookoutv2/repository/tables.go index e4d4eaf2397..1ca9207b336 100644 --- a/internal/lookoutv2/repository/tables.go +++ b/internal/lookoutv2/repository/tables.go @@ -12,13 +12,11 @@ const ( submittedField = "submitted" lastTransitionTimeField = "lastTransitionTime" - jobTable = "job" - jobRunTable = "job_run" - userAnnotationLookupTable = "user_annotation_lookup" + jobTable = "job" + jobRunTable = "job_run" - jobTableAbbrev = "j" - jobRunTableAbbrev = "jr" - userAnnotationLookupTableAbbrev = "ual" + jobTableAbbrev = "j" + jobRunTableAbbrev = "jr" jobIdCol = "job_id" queueCol = "queue" @@ -34,9 +32,6 @@ const ( submittedCol = "submitted" lastTransitionTimeCol = "last_transition_time_seconds" priorityClassCol = "priority_class" - - annotationKeyCol = "key" - annotationValueCol = "value" ) type AggregateType int @@ -52,17 +47,12 @@ const ( type LookoutTables struct { // field name -> column name fieldColumnMap map[string]string - // column name -> set of tables with that column - // (the same column could be in multiple tables, either as a foreign key or for denormalization) - columnsTableMap map[string]map[string]bool // set of column names that can be ordered orderableColumns map[string]bool // column name -> set of supported matches for column filterableColumns map[string]map[string]bool // table name -> abbreviated table name tableAbbrevs map[string]string - // order of precedence for tables - which tables to prioritize querying from - tablePrecedence []string // columns that can be grouped by groupableColumns map[string]bool // map from column to aggregate that can be performed on it @@ -87,22 +77,6 @@ func NewTables() *LookoutTables { "lastTransitionTime": lastTransitionTimeCol, "priorityClass": priorityClassCol, }, - columnsTableMap: map[string]map[string]bool{ - jobIdCol: util.StringListToSet([]string{jobTable, jobRunTable, userAnnotationLookupTable}), - queueCol: util.StringListToSet([]string{jobTable, userAnnotationLookupTable}), - jobSetCol: util.StringListToSet([]string{jobTable, userAnnotationLookupTable}), - ownerCol: util.StringListToSet([]string{jobTable}), - namespaceCol: util.StringListToSet([]string{jobTable}), - stateCol: util.StringListToSet([]string{jobTable}), - cpuCol: util.StringListToSet([]string{jobTable}), - memoryCol: util.StringListToSet([]string{jobTable}), - ephemeralStorageCol: util.StringListToSet([]string{jobTable}), - gpuCol: util.StringListToSet([]string{jobTable}), - priorityCol: util.StringListToSet([]string{jobTable}), - submittedCol: util.StringListToSet([]string{jobTable}), - lastTransitionTimeCol: util.StringListToSet([]string{jobTable}), - priorityClassCol: util.StringListToSet([]string{jobTable}), - }, orderableColumns: util.StringListToSet([]string{ jobIdCol, jobSetCol, @@ -124,14 +98,8 @@ func NewTables() *LookoutTables { priorityClassCol: util.StringListToSet([]string{model.MatchExact, model.MatchStartsWith, model.MatchContains}), }, tableAbbrevs: map[string]string{ - jobTable: jobTableAbbrev, - jobRunTable: jobRunTableAbbrev, - userAnnotationLookupTable: userAnnotationLookupTableAbbrev, - }, - tablePrecedence: []string{ - jobTable, - jobRunTable, - userAnnotationLookupTable, + jobTable: jobTableAbbrev, + jobRunTable: jobRunTableAbbrev, }, groupableColumns: util.StringListToSet([]string{ queueCol, @@ -174,14 +142,6 @@ func (c *LookoutTables) SupportsMatch(col, match string) bool { return isSupported } -func (c *LookoutTables) TablesForColumn(col string) (map[string]bool, error) { - tables, ok := c.columnsTableMap[col] - if !ok { - return nil, errors.Errorf("cannot find table for column %s", col) - } - return tables, nil -} - func (c *LookoutTables) TableAbbrev(table string) (string, error) { abbrev, ok := c.tableAbbrevs[table] if !ok { @@ -190,10 +150,6 @@ func (c *LookoutTables) TableAbbrev(table string) (string, error) { return abbrev, nil } -func (c *LookoutTables) TablePrecedence() []string { - return c.tablePrecedence -} - func (c *LookoutTables) IsGroupable(col string) bool { _, ok := c.groupableColumns[col] return ok diff --git a/internal/lookoutv2/repository/util.go b/internal/lookoutv2/repository/util.go index 8841c66e4d4..9ca90d6271f 100644 --- a/internal/lookoutv2/repository/util.go +++ b/internal/lookoutv2/repository/util.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/clock" "k8s.io/utils/pointer" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -37,6 +38,7 @@ type JobSimulator struct { events []*armadaevents.EventSequence_Event converter *instructions.InstructionConverter store *lookoutdb.LookoutDb + clock clock.Clock } type JobOptions struct { @@ -66,6 +68,15 @@ func NewJobSimulator(converter *instructions.InstructionConverter, store *lookou return &JobSimulator{ converter: converter, store: store, + clock: clock.RealClock{}, + } +} + +func NewJobSimulatorWithClock(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, clk clock.Clock) *JobSimulator { + return &JobSimulator{ + converter: converter, + store: store, + clock: clk, } } @@ -170,14 +181,16 @@ func (js *JobSimulator) Submit(queue, jobSet, owner, namespace string, timestamp return js } -func (js *JobSimulator) Lease(runId string, timestamp time.Time) *JobSimulator { +func (js *JobSimulator) Lease(runId string, cluster string, node string, timestamp time.Time) *JobSimulator { ts := timestampOrNow(timestamp) leasedEvent := &armadaevents.EventSequence_Event{ Created: &ts, Event: &armadaevents.EventSequence_Event_JobRunLeased{ JobRunLeased: &armadaevents.JobRunLeased{ - RunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(runId)), - JobId: js.jobId, + RunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(runId)), + JobId: js.jobId, + ExecutorId: cluster, + NodeId: node, }, }, } @@ -186,9 +199,13 @@ func (js *JobSimulator) Lease(runId string, timestamp time.Time) *JobSimulator { js.job.LastActiveRunId = &runId js.job.LastTransitionTime = ts js.job.State = string(lookout.JobLeased) - updateRun(js.job, &runPatch{ + js.job.Cluster = cluster + js.job.Node = &node + js.updateRun(js.job, &runPatch{ runId: runId, jobRunState: lookout.JobRunLeased, + cluster: &cluster, + node: &node, leased: &ts, }) return js @@ -232,10 +249,7 @@ func (js *JobSimulator) Pending(runId string, cluster string, timestamp time.Tim jobRunState: lookout.JobRunPending, pending: &ts, } - if js.converter.IsLegacy() { - rp.leased = &ts - } - updateRun(js.job, rp) + js.updateRun(js.job, rp) return js } @@ -266,7 +280,7 @@ func (js *JobSimulator) Running(runId string, node string, timestamp time.Time) js.job.LastTransitionTime = ts js.job.State = string(lookout.JobRunning) js.job.Node = &node - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, jobRunState: lookout.JobRunRunning, node: &node, @@ -289,7 +303,7 @@ func (js *JobSimulator) RunSucceeded(runId string, timestamp time.Time) *JobSimu js.events = append(js.events, runSucceeded) js.job.LastActiveRunId = &runId - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, exitCode: pointer.Int32(0), finished: &ts, @@ -338,7 +352,7 @@ func (js *JobSimulator) LeaseReturned(runId string, message string, timestamp ti } js.events = append(js.events, leaseReturned) - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, finished: &ts, jobRunState: lookout.JobRunLeaseReturned, @@ -383,7 +397,7 @@ func (js *JobSimulator) Reprioritized(newPriority uint32, timestamp time.Time) * return js } -func (js *JobSimulator) RunFailed(runId string, node string, exitCode int32, message string, timestamp time.Time) *JobSimulator { +func (js *JobSimulator) RunFailed(runId string, node string, exitCode int32, message string, debug string, timestamp time.Time) *JobSimulator { ts := timestampOrNow(timestamp) runFailed := &armadaevents.EventSequence_Event{ Created: &ts, @@ -396,8 +410,9 @@ func (js *JobSimulator) RunFailed(runId string, node string, exitCode int32, mes Terminal: true, Reason: &armadaevents.Error_PodError{ PodError: &armadaevents.PodError{ - Message: message, - NodeName: node, + Message: message, + DebugMessage: debug, + NodeName: node, ContainerErrors: []*armadaevents.ContainerError{ {ExitCode: exitCode}, }, @@ -411,7 +426,7 @@ func (js *JobSimulator) RunFailed(runId string, node string, exitCode int32, mes js.events = append(js.events, runFailed) js.job.LastActiveRunId = &runId - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, exitCode: &exitCode, finished: &ts, @@ -454,23 +469,34 @@ func (js *JobSimulator) Failed(node string, exitCode int32, message string, time func (js *JobSimulator) Preempted(timestamp time.Time) *JobSimulator { ts := timestampOrNow(timestamp) - jobIdProto, err := armadaevents.ProtoUuidFromUlidString(util.NewULID()) - if err != nil { - log.WithError(err).Errorf("Could not convert job ID to UUID: %s", util.NewULID()) + + preemptedJob := &armadaevents.EventSequence_Event{ + Created: &ts, + Event: &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{ + JobId: js.jobId, + Errors: []*armadaevents.Error{ + { + Terminal: true, + Reason: &armadaevents.Error_JobRunPreemptedError{ + JobRunPreemptedError: &armadaevents.JobRunPreemptedError{}, + }, + }, + }, + }, + }, } - preempted := &armadaevents.EventSequence_Event{ + preemptedRun := &armadaevents.EventSequence_Event{ Created: &ts, Event: &armadaevents.EventSequence_Event_JobRunPreempted{ JobRunPreempted: &armadaevents.JobRunPreempted{ - PreemptedJobId: js.jobId, - PreemptiveJobId: jobIdProto, - PreemptedRunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(uuid.NewString())), - PreemptiveRunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(uuid.NewString())), + PreemptedJobId: js.jobId, + PreemptedRunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(uuid.NewString())), }, }, } - js.events = append(js.events, preempted) + js.events = append(js.events, preemptedJob, preemptedRun) js.job.LastTransitionTime = ts js.job.State = string(lookout.JobPreempted) @@ -504,7 +530,7 @@ func (js *JobSimulator) RunTerminated(runId string, cluster string, node string, } js.events = append(js.events, terminated) - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, cluster: &cluster, finished: &ts, @@ -541,7 +567,7 @@ func (js *JobSimulator) RunUnschedulable(runId string, cluster string, node stri } js.events = append(js.events, runUnschedulable) - updateRun(js.job, &runPatch{ + js.updateRun(js.job, &runPatch{ runId: runId, cluster: &cluster, finished: &ts, @@ -551,14 +577,14 @@ func (js *JobSimulator) RunUnschedulable(runId string, cluster string, node stri return js } -func (js *JobSimulator) LeaseExpired(timestamp time.Time) *JobSimulator { +func (js *JobSimulator) LeaseExpired(runId string, timestamp time.Time, _ clock.Clock) *JobSimulator { ts := timestampOrNow(timestamp) leaseReturned := &armadaevents.EventSequence_Event{ Created: &ts, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ JobId: js.jobId, - RunId: eventutil.LegacyJobRunId(), + RunId: armadaevents.ProtoUuidFromUuid(uuid.MustParse(runId)), Errors: []*armadaevents.Error{ { Terminal: true, @@ -572,8 +598,8 @@ func (js *JobSimulator) LeaseExpired(timestamp time.Time) *JobSimulator { } js.events = append(js.events, leaseReturned) - updateRun(js.job, &runPatch{ - runId: eventutil.LEGACY_RUN_ID, + js.updateRun(js.job, &runPatch{ + runId: runId, finished: &ts, jobRunState: lookout.JobRunLeaseExpired, }) @@ -614,13 +640,14 @@ func timestampOrNow(timestamp time.Time) time.Time { return timestamp } -func updateRun(job *model.Job, patch *runPatch) { +func (js *JobSimulator) updateRun(job *model.Job, patch *runPatch) { if patch.exitCode != nil { job.ExitCode = patch.exitCode } for _, run := range job.Runs { if run.RunId == patch.runId { patchRun(run, patch) + job.RuntimeSeconds = calculateJobRuntime(run.Started, run.Finished, js.clock) return } } @@ -639,6 +666,7 @@ func updateRun(job *model.Job, patch *runPatch) { RunId: patch.runId, Started: model.NewPostgreSQLTime(patch.started), }) + job.RuntimeSeconds = calculateJobRuntime(model.NewPostgreSQLTime(patch.started), model.NewPostgreSQLTime(patch.finished), js.clock) } func patchRun(run *model.Run, patch *runPatch) { diff --git a/internal/lookoutv2/schema/migrations/007_queue.sql b/internal/lookoutv2/schema/migrations/007_queue.sql new file mode 100644 index 00000000000..14de9f62e8e --- /dev/null +++ b/internal/lookoutv2/schema/migrations/007_queue.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS queue +( + name text NOT NULL PRIMARY KEY, + definition bytea NOT NULL +) diff --git a/internal/lookoutv2/schema/migrations/008_run_debug_column.sql b/internal/lookoutv2/schema/migrations/008_run_debug_column.sql new file mode 100644 index 00000000000..cba7eaec2ba --- /dev/null +++ b/internal/lookoutv2/schema/migrations/008_run_debug_column.sql @@ -0,0 +1 @@ +ALTER TABLE job_run ADD COLUMN debug bytea NULL; diff --git a/internal/lookoutv2/schema/migrations/009_job_deduplication.sql b/internal/lookoutv2/schema/migrations/009_job_deduplication.sql new file mode 100644 index 00000000000..eb99e84e7e2 --- /dev/null +++ b/internal/lookoutv2/schema/migrations/009_job_deduplication.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS job_deduplication +( + deduplication_id text NOT NULL PRIMARY KEY, + job_id text NOT NULL, + inserted TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX idx_job_deduplication_inserted ON job_deduplication (inserted); diff --git a/internal/lookoutv2/schema/migrations/010_drop_unused_table_and_indexes.sql b/internal/lookoutv2/schema/migrations/010_drop_unused_table_and_indexes.sql new file mode 100644 index 00000000000..40157d0de36 --- /dev/null +++ b/internal/lookoutv2/schema/migrations/010_drop_unused_table_and_indexes.sql @@ -0,0 +1,5 @@ +/* Dropping table drops associated indexes, rules, triggers */ +DROP TABLE IF EXISTS user_annotation_lookup; + +DROP INDEX IF EXISTS idx_job_jobset_last_transition_time_seconds; +DROP INDEX IF EXISTS idx_job_queue_jobset_last_transition_time_seconds; diff --git a/internal/lookoutv2/swagger.yaml b/internal/lookoutv2/swagger.yaml index 39024f5737d..c2bf25f59ad 100644 --- a/internal/lookoutv2/swagger.yaml +++ b/internal/lookoutv2/swagger.yaml @@ -25,6 +25,8 @@ definitions: - duplicate - annotations - runs + - cluster + - RuntimeSeconds properties: jobId: type: string @@ -113,6 +115,20 @@ definitions: cancelReason: type: string x-nullable: true + node: + type: string + x-nullable: true + cluster: + type: string + x-nullable: false + exitCode: + type: integer + format: int32 + x-nullable: true + RuntimeSeconds: + type: integer + format: int32 + x-nullable: false run: type: object required: @@ -163,6 +179,7 @@ definitions: - RUN_LEASE_EXPIRED - RUN_MAX_RUNS_EXCEEDED - RUN_LEASED + - RUN_CANCELLED x-nullable: false exitCode: type: integer @@ -400,6 +417,44 @@ paths: schema: $ref: "#/definitions/error" + /api/v1/jobRunDebugMessage: + post: + operationId: getJobRunDebugMessage + consumes: + - application/json + parameters: + - name: getJobRunDebugMessageRequest + required: true + in: body + schema: + type: object + required: + - runId + properties: + runId: + type: string + x-nullable: false + produces: + - application/json + responses: + 200: + description: Returns debug message for specific job run (if present) + schema: + type: object + properties: + errorString: + type: string + description: Debug message for individual job run + x-nullable: false + 400: + description: Error response + schema: + $ref: "#/definitions/error" + default: + description: Error response + schema: + $ref: "#/definitions/error" + /api/v1/jobGroups: post: operationId: groupJobs diff --git a/internal/pulsartest/app.go b/internal/pulsartest/app.go deleted file mode 100644 index 93b79259bfd..00000000000 --- a/internal/pulsartest/app.go +++ /dev/null @@ -1,64 +0,0 @@ -package pulsartest - -import ( - "fmt" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/google/uuid" - "github.com/pkg/errors" - - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/common/pulsarutils" -) - -type App struct { - Producer pulsar.Producer - Reader pulsar.Reader -} - -type Params struct { - Pulsar configuration.PulsarConfig -} - -func New(params Params, cmdType string) (*App, error) { - serverId := uuid.New() - - pulsarClient, err := pulsarutils.NewPulsarClient(¶ms.Pulsar) - if err != nil { - return nil, err - } - - var producer pulsar.Producer - var reader pulsar.Reader - - if cmdType == "submit" { - producerName := fmt.Sprintf("pulsartest-%s", serverId) - producer, err = pulsarClient.CreateProducer(pulsar.ProducerOptions{ - Name: producerName, - Topic: params.Pulsar.JobsetEventsTopic, - }) - - if err != nil { - return nil, errors.Wrapf(err, "error creating pulsar producer %s", producerName) - } - - } else if cmdType == "watch" { - reader, err = pulsarClient.CreateReader(pulsar.ReaderOptions{ - Topic: params.Pulsar.JobsetEventsTopic, - StartMessageID: pulsar.EarliestMessageID(), - }) - - if err != nil { - return nil, errors.Wrapf(err, "error creating pulsar reader") - } - - } else { - return nil, errors.New("cmdType must be either 'submit' or 'watch'") - } - - app := &App{ - Producer: producer, - Reader: reader, - } - return app, nil -} diff --git a/internal/pulsartest/build/build.go b/internal/pulsartest/build/build.go deleted file mode 100644 index 762784ede2b..00000000000 --- a/internal/pulsartest/build/build.go +++ /dev/null @@ -1,9 +0,0 @@ -package build - -var BuildTime string - -var GitCommit string - -var ReleaseVersion string - -var GoVersion string diff --git a/internal/pulsartest/submit.go b/internal/pulsartest/submit.go deleted file mode 100644 index 7ce7d06b185..00000000000 --- a/internal/pulsartest/submit.go +++ /dev/null @@ -1,52 +0,0 @@ -package pulsartest - -import ( - "context" - "os" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - - "github.com/armadaproject/armada/internal/common/requestid" - "github.com/armadaproject/armada/pkg/armadaevents" -) - -// Submit a job, represented by a file, to the Pulsar server. -func (a *App) Submit(path string) error { - eventYaml, err := os.ReadFile(path) - if err != nil { - return err - } - - es := &armadaevents.EventSequence{} - - if err = UnmarshalEventSubmission(eventYaml, es); err != nil { - return err - } - - log.Infof("submitting event sequence: %+v\n", es) - - // synchronously send request with event sequence - payload, err := proto.Marshal(es) - if err != nil { - return errors.WithStack(err) - } - - ctx := context.Background() - requestId := requestid.FromContextOrMissing(ctx) - - _, err = a.Producer.Send( - ctx, - &pulsar.ProducerMessage{ - Payload: payload, - Properties: map[string]string{ - requestid.MetadataKey: requestId, - }, - Key: es.JobSetName, - }, - ) - - return err -} diff --git a/internal/pulsartest/util.go b/internal/pulsartest/util.go deleted file mode 100644 index a8f6f3191e6..00000000000 --- a/internal/pulsartest/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package pulsartest - -import ( - "bytes" - - apimachineryYaml "k8s.io/apimachinery/pkg/util/yaml" - - "github.com/armadaproject/armada/pkg/armadaevents" -) - -// UnmarshalEventSubmission unmarshalls bytes into an EventSequence -func UnmarshalEventSubmission(yamlBytes []byte, es *armadaevents.EventSequence) error { - return apimachineryYaml.NewYAMLOrJSONDecoder(bytes.NewReader(yamlBytes), 128).Decode(es) -} diff --git a/internal/pulsartest/watch.go b/internal/pulsartest/watch.go deleted file mode 100644 index c8db76e51a5..00000000000 --- a/internal/pulsartest/watch.go +++ /dev/null @@ -1,34 +0,0 @@ -package pulsartest - -import ( - "fmt" - "log" - "os" - - "github.com/sanity-io/litter" - - "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/eventutil" -) - -// Watch for Pulsar events -func (a *App) Watch() error { - defer a.Reader.Close() - - for a.Reader.HasNext() { - msg, err := a.Reader.Next(armadacontext.Background()) - if err != nil { - log.Fatal(err) - } - - ctx := armadacontext.Background() - - es, err := eventutil.UnmarshalEventSequence(ctx, msg.Payload()) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not unmarshal proto for msg %s\n", msg.ID()) - } - - fmt.Printf("Id: %s\nMessage: %s\n", msg.ID().String(), litter.Sdump(es)) - } - return nil -} diff --git a/internal/scheduler/adapters/adapters.go b/internal/scheduler/adapters/adapters.go index e8bbd80856a..1115d88869d 100644 --- a/internal/scheduler/adapters/adapters.go +++ b/internal/scheduler/adapters/adapters.go @@ -1,38 +1,26 @@ package adapters import ( - "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/armadaevents" ) -// PodRequirementsFromPod function creates the schedulerobjects and creates a value for the -// annotation field by supplying it with a cloned value of pod.Annotations -func PodRequirementsFromPod(pod *v1.Pod, priorityByPriorityClassName map[string]types.PriorityClass) *schedulerobjects.PodRequirements { - rv := PodRequirementsFromPodSpec(&pod.Spec, priorityByPriorityClassName) - rv.Annotations = maps.Clone(pod.Annotations) - return rv -} - // PodRequirementsFromPodSpec function returns *schedulerobjects.PodRequirements for podSpec. // An error is logged if the podSpec uses an unknown priority class. // This function may mutate podSpec. func PodRequirementsFromPodSpec(podSpec *v1.PodSpec, priorityByPriorityClassName map[string]types.PriorityClass) *schedulerobjects.PodRequirements { - priority, ok := api.PriorityFromPodSpec(podSpec, priorityByPriorityClassName) + priority, ok := PriorityFromPodSpec(podSpec, priorityByPriorityClassName) if priorityByPriorityClassName != nil && !ok { // Ignore this error if priorityByPriorityClassName is explicitly set to nil. // We assume that in this case the caller is sure the priority does not need to be set. err := errors.Errorf("unknown priorityClassName %s", podSpec.PriorityClassName) - logging.WithStacktrace(logrus.NewEntry(logrus.New()), err).Error("failed to get priority from priorityClassName") + logging.WithStacktrace(logrus.NewEntry(logrus.StandardLogger()), err).Error("failed to get priority from priorityClassName") } preemptionPolicy := string(v1.PreemptLowerPriority) if podSpec.PreemptionPolicy != nil { @@ -48,45 +36,29 @@ func PodRequirementsFromPodSpec(podSpec *v1.PodSpec, priorityByPriorityClassName } } -// SchedulingInfoFromSubmitJob returns a minimal representation of a job containing only the info needed by the scheduler. -func SchedulingInfoFromSubmitJob(submitJob *armadaevents.SubmitJob, submitTime time.Time, priorityClasses map[string]types.PriorityClass) (*schedulerobjects.JobSchedulingInfo, error) { - // Component common to all jobs. - schedulingInfo := &schedulerobjects.JobSchedulingInfo{ - Lifetime: submitJob.Lifetime, - AtMostOnce: submitJob.AtMostOnce, - Preemptible: submitJob.Preemptible, - ConcurrencySafe: submitJob.ConcurrencySafe, - SubmitTime: submitTime, - Priority: submitJob.Priority, - Version: 0, - QueueTtlSeconds: submitJob.QueueTtlSeconds, +// PriorityFromPodSpec returns the priority in a pod spec. +// If priority is set directly, that value is returned. +// Otherwise, it returns the value of the key podSpec. +// In both cases the value along with true boolean is returned. +// PriorityClassName in priorityByPriorityClassName map. +// If no priority is set for the pod spec, 0 along with a false boolean would be returned +func PriorityFromPodSpec(podSpec *v1.PodSpec, priorityClasses map[string]types.PriorityClass) (int32, bool) { + // If there's no podspec there's nothing we can do + if podSpec == nil { + return 0, false } - // Scheduling requirements specific to the objects that make up this job. - switch object := submitJob.MainObject.Object.(type) { - case *armadaevents.KubernetesMainObject_PodSpec: - podSpec := object.PodSpec.PodSpec - schedulingInfo.PriorityClassName = podSpec.PriorityClassName - podRequirements := PodRequirementsFromPodSpec(podSpec, priorityClasses) - if submitJob.ObjectMeta != nil { - podRequirements.Annotations = maps.Clone(submitJob.ObjectMeta.Annotations) - } - if submitJob.MainObject.ObjectMeta != nil { - if podRequirements.Annotations == nil { - podRequirements.Annotations = make(map[string]string, len(submitJob.MainObject.ObjectMeta.Annotations)) - } - maps.Copy(podRequirements.Annotations, submitJob.MainObject.ObjectMeta.Annotations) - } - schedulingInfo.ObjectRequirements = append( - schedulingInfo.ObjectRequirements, - &schedulerobjects.ObjectRequirements{ - Requirements: &schedulerobjects.ObjectRequirements_PodRequirements{ - PodRequirements: podRequirements, - }, - }, - ) - default: - return nil, errors.Errorf("unsupported object type %T", object) + // If a priority is directly specified, use that + if podSpec.Priority != nil { + return *podSpec.Priority, true } - return schedulingInfo, nil + + // If we find a priority class use that + priorityClass, ok := priorityClasses[podSpec.PriorityClassName] + if ok { + return priorityClass.Priority, true + } + + // Couldn't find anything + return 0, false } diff --git a/internal/scheduler/adapters/adapters_test.go b/internal/scheduler/adapters/adapters_test.go index 90e6df0318b..6ab18152df8 100644 --- a/internal/scheduler/adapters/adapters_test.go +++ b/internal/scheduler/adapters/adapters_test.go @@ -5,23 +5,30 @@ import ( "os" "testing" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) +const ( + PriorityClass0 = "priority-0" + PriorityClass1 = "priority-1" + PriorityClass2 = "priority-2" + PriorityClass3 = "priority-3" +) + var ( priorityByPriorityClassName = map[string]types.PriorityClass{ - "priority-0": {Priority: 0, Preemptible: true}, - "priority-1": {Priority: 1, Preemptible: true}, - "priority-2": {Priority: 2, Preemptible: true}, - "priority-3": {Priority: 3, Preemptible: false}, + PriorityClass0: {Priority: 0, Preemptible: true}, + PriorityClass1: {Priority: 1, Preemptible: true}, + PriorityClass2: {Priority: 2, Preemptible: true}, + PriorityClass3: {Priority: 3, Preemptible: false}, } priority int32 = 1 @@ -99,20 +106,15 @@ func TestPodRequirementsFromPodSpecPriorityByPriorityClassName(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Creating backup for stderr - old := os.Stderr r, w, _ := os.Pipe() - // Assigning stderr to file, w - os.Stderr = w // Stderr from this function would be written to file w + logrus.SetOutput(w) scheduler := PodRequirementsFromPodSpec(&test.podspec, test.priorityByPriorityClassName) // Closing file, w err := w.Close() require.NoError(t, err) // Reading from file out, _ := io.ReadAll(r) - // Restoring stderr - os.Stderr = old expectedScheduler.Priority = test.priority assert.Equal(t, scheduler, expectedScheduler) // if loggedError is true, bytes should be written to stderr, @@ -161,37 +163,45 @@ func TestPodRequirementsFromPodSpecPreemptionPolicy(t *testing.T) { } } -func TestPodRequirementsFromPod(t *testing.T) { - podSpec := &v1.PodSpec{ - Priority: &priority, - Containers: []v1.Container{ - { - Resources: v1.ResourceRequirements{ - Limits: v1.ResourceList{ - v1.ResourceName("cpu"): *resource.NewMilliQuantity(5300, resource.DecimalSI), - v1.ResourceName("memory"): *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), - }, - Requests: v1.ResourceList{ - v1.ResourceName("cpu"): *resource.NewMilliQuantity(300, resource.DecimalSI), - v1.ResourceName("memory"): *resource.NewQuantity(2*1024*1024*1024, resource.BinarySI), - }, - }, +func TestPriorityFromPodSpec(t *testing.T) { + tests := map[string]struct { + podSpec *v1.PodSpec + expectedPriority int32 + expectedOk bool + }{ + "nil podSpec": { + podSpec: nil, + expectedPriority: 0, + expectedOk: false, + }, + "priority already set": { + podSpec: &v1.PodSpec{ + Priority: pointer.Int32(1), + PriorityClassName: PriorityClass2, }, + expectedPriority: 1, + expectedOk: true, }, - } - pod := v1.Pod{ - Spec: *podSpec, - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - configuration.GangIdAnnotation: "gang-id", - configuration.GangCardinalityAnnotation: "1", + "existing priorityClass": { + podSpec: &v1.PodSpec{ + PriorityClassName: PriorityClass2, + }, + expectedPriority: 2, + expectedOk: true, + }, + "non-existing priorityClass": { + podSpec: &v1.PodSpec{ + PriorityClassName: "does not exist", }, + expectedPriority: 0, + expectedOk: false, }, } - rv := PodRequirementsFromPod(&pod, priorityByPriorityClassName) - rv.Annotations["something"] = "test" - // Ensures that any modification made to the returned value of PodRequirementsFromPod function, "rv", does not - // affect the original pod definition. This assertion checks if the length of "pod.Annotation" is altered - // in view of the modification made to "rv" above. - assert.Len(t, pod.Annotations, 2) + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + p, ok := PriorityFromPodSpec(tc.podSpec, priorityByPriorityClassName) + assert.Equal(t, tc.expectedPriority, p) + assert.Equal(t, tc.expectedOk, ok) + }) + } } diff --git a/internal/scheduler/api.go b/internal/scheduler/api.go index a501d242241..01259561597 100644 --- a/internal/scheduler/api.go +++ b/internal/scheduler/api.go @@ -12,15 +12,14 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" "github.com/armadaproject/armada/internal/common/logging" "github.com/armadaproject/armada/internal/common/pulsarutils" - "github.com/armadaproject/armada/internal/common/schedulers" + "github.com/armadaproject/armada/internal/common/slices" priorityTypes "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/armadaevents" @@ -37,8 +36,6 @@ type ExecutorApi struct { jobRepository database.JobRepository // Interface to the component storing executor information, such as which when we last heard from an executor. executorRepository database.ExecutorRepository - // Like executorRepository - legacyExecutorRepository database.ExecutorRepository // Allowed priority class priorities. allowedPriorities []int32 // Known priority classes @@ -55,7 +52,6 @@ type ExecutorApi struct { func NewExecutorApi(producer pulsar.Producer, jobRepository database.JobRepository, executorRepository database.ExecutorRepository, - legacyExecutorRepository database.ExecutorRepository, allowedPriorities []int32, nodeIdLabel string, priorityClassNameOverride *string, @@ -69,7 +65,6 @@ func NewExecutorApi(producer pulsar.Producer, producer: producer, jobRepository: jobRepository, executorRepository: executorRepository, - legacyExecutorRepository: legacyExecutorRepository, allowedPriorities: allowedPriorities, maxPulsarMessageSizeBytes: maxPulsarMessageSizeBytes, nodeIdLabel: nodeIdLabel, @@ -96,9 +91,6 @@ func (srv *ExecutorApi) LeaseJobRuns(stream executorapi.ExecutorApi_LeaseJobRuns if err := srv.executorRepository.StoreExecutor(ctx, executor); err != nil { return err } - if err = srv.legacyExecutorRepository.StoreExecutor(ctx, executor); err != nil { - return err - } requestRuns, err := runIdsFromLeaseRequest(req) if err != nil { @@ -122,7 +114,7 @@ func (srv *ExecutorApi) LeaseJobRuns(stream executorapi.ExecutorApi_LeaseJobRuns if err := stream.Send(&executorapi.LeaseStreamMessage{ Event: &executorapi.LeaseStreamMessage_CancelRuns{ CancelRuns: &executorapi.CancelRuns{ - JobRunIdsToCancel: util.Map(runsToCancel, func(x uuid.UUID) *armadaevents.Uuid { + JobRunIdsToCancel: slices.Map(runsToCancel, func(x uuid.UUID) *armadaevents.Uuid { return armadaevents.ProtoUuidFromUuid(x) }), }, @@ -318,7 +310,7 @@ func addAnnotations(job *armadaevents.SubmitJob, annotations map[string]string) // ReportEvents publishes all eventSequences to Pulsar. The eventSequences are compacted for more efficient publishing. func (srv *ExecutorApi) ReportEvents(grpcCtx context.Context, list *executorapi.EventList) (*types.Empty, error) { ctx := armadacontext.FromGrpcCtx(grpcCtx) - err := pulsarutils.CompactAndPublishSequences(ctx, list.Events, srv.producer, srv.maxPulsarMessageSizeBytes, schedulers.Pulsar) + err := pulsarutils.CompactAndPublishSequences(ctx, list.Events, srv.producer, srv.maxPulsarMessageSizeBytes) return &types.Empty{}, err } @@ -341,7 +333,7 @@ func (srv *ExecutorApi) executorFromLeaseRequest(ctx *armadacontext.Context, req Nodes: nodes, MinimumJobSize: schedulerobjects.ResourceList{Resources: req.MinimumJobSize}, LastUpdateTime: now, - UnassignedJobRuns: util.Map(req.UnassignedJobRunIds, func(jobId armadaevents.Uuid) string { + UnassignedJobRuns: slices.Map(req.UnassignedJobRunIds, func(jobId armadaevents.Uuid) string { return strings.ToLower(armadaevents.UuidFromProtoUuid(&jobId).String()) }), } diff --git a/internal/scheduler/api_test.go b/internal/scheduler/api_test.go index 45197fd6386..343c1896591 100644 --- a/internal/scheduler/api_test.go +++ b/internal/scheduler/api_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/compress" @@ -294,7 +294,6 @@ func TestExecutorApi_LeaseJobRuns(t *testing.T) { mockPulsarProducer := mocks.NewMockProducer(ctrl) mockJobRepository := schedulermocks.NewMockJobRepository(ctrl) mockExecutorRepository := schedulermocks.NewMockExecutorRepository(ctrl) - mockLegacyExecutorRepository := schedulermocks.NewMockExecutorRepository(ctrl) mockStream := schedulermocks.NewMockExecutorApi_LeaseJobRunsServer(ctrl) runIds, err := runIdsFromLeaseRequest(tc.request) @@ -307,10 +306,6 @@ func TestExecutorApi_LeaseJobRuns(t *testing.T) { assert.Equal(t, tc.expectedExecutor, executor) return nil }).Times(1) - mockLegacyExecutorRepository.EXPECT().StoreExecutor(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx *armadacontext.Context, executor *schedulerobjects.Executor) error { - assert.Equal(t, tc.expectedExecutor, executor) - return nil - }).Times(1) mockJobRepository.EXPECT().FindInactiveRuns(gomock.Any(), schedulermocks.SliceMatcher[uuid.UUID]{Expected: runIds}).Return(tc.runsToCancel, nil).Times(1) mockJobRepository.EXPECT().FetchJobRunLeases(gomock.Any(), tc.request.ExecutorId, maxJobsPerCall, runIds).Return(tc.leases, nil).Times(1) @@ -325,7 +320,6 @@ func TestExecutorApi_LeaseJobRuns(t *testing.T) { mockPulsarProducer, mockJobRepository, mockExecutorRepository, - mockLegacyExecutorRepository, []int32{1000, 2000}, "kubernetes.io/hostname", nil, @@ -434,7 +428,6 @@ func TestExecutorApi_Publish(t *testing.T) { mockPulsarProducer := mocks.NewMockProducer(ctrl) mockJobRepository := schedulermocks.NewMockJobRepository(ctrl) mockExecutorRepository := schedulermocks.NewMockExecutorRepository(ctrl) - mockLegacyExecutorRepository := schedulermocks.NewMockExecutorRepository(ctrl) // capture all sent messages var capturedEvents []*armadaevents.EventSequence @@ -453,7 +446,6 @@ func TestExecutorApi_Publish(t *testing.T) { mockPulsarProducer, mockJobRepository, mockExecutorRepository, - mockLegacyExecutorRepository, []int32{1000, 2000}, "kubernetes.io/hostname", nil, diff --git a/internal/scheduler/common.go b/internal/scheduler/common.go index 2f513212e8c..066ab502505 100644 --- a/internal/scheduler/common.go +++ b/internal/scheduler/common.go @@ -5,50 +5,57 @@ import ( "golang.org/x/exp/maps" + "github.com/armadaproject/armada/internal/common/armadacontext" armadamaps "github.com/armadaproject/armada/internal/common/maps" armadaslices "github.com/armadaproject/armada/internal/common/slices" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// JobsSummary returns a string giving an overview of the provided jobs meant for logging. -// For example: "affected queues [A, B]; resources {A: {cpu: 1}, B: {cpu: 2}}; jobs [jobAId, jobBId]". -func JobsSummary(jctxs []*schedulercontext.JobSchedulingContext) string { +// PrintJobSummary logs a summary of the job scheduling context +// It will log a high level summary at Info level, and a list of all queues + jobs affected at debug level +func PrintJobSummary(ctx *armadacontext.Context, prefix string, jctxs []*schedulercontext.JobSchedulingContext) { if len(jctxs) == 0 { - return "" + return } jobsByQueue := armadaslices.MapAndGroupByFuncs( jctxs, func(jctx *schedulercontext.JobSchedulingContext) string { - return jctx.Job.GetQueue() + return jctx.Job.Queue() }, - func(jctx *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { + func(jctx *schedulercontext.JobSchedulingContext) *jobdb.Job { return jctx.Job }, ) resourcesByQueue := armadamaps.MapValues( jobsByQueue, - func(jobs []interfaces.LegacySchedulerJob) schedulerobjects.ResourceList { + func(jobs []*jobdb.Job) schedulerobjects.ResourceList { rv := schedulerobjects.NewResourceListWithDefaultSize() for _, job := range jobs { - rv.AddV1ResourceList(job.GetResourceRequirements().Requests) + rv.AddV1ResourceList(job.ResourceRequirements().Requests) } return rv }, ) + jobCountPerQueue := armadamaps.MapValues( + jobsByQueue, + func(jobs []*jobdb.Job) int { + return len(jobs) + }, + ) jobIdsByQueue := armadamaps.MapValues( jobsByQueue, - func(jobs []interfaces.LegacySchedulerJob) []string { + func(jobs []*jobdb.Job) []string { rv := make([]string, len(jobs)) for i, job := range jobs { - rv[i] = job.GetId() + rv[i] = job.Id() } return rv }, ) - return fmt.Sprintf( - "affected queues %v; resources %v; jobs %v", + summary := fmt.Sprintf( + "affected queues %v; resources %v; jobs per queue %v", maps.Keys(jobsByQueue), armadamaps.MapValues( resourcesByQueue, @@ -56,6 +63,10 @@ func JobsSummary(jctxs []*schedulercontext.JobSchedulingContext) string { return rl.CompactString() }, ), - jobIdsByQueue, + jobCountPerQueue, ) + verbose := fmt.Sprintf("affected jobs %v", jobIdsByQueue) + + ctx.Infof("%s %s", prefix, summary) + ctx.Debugf("%s %s", prefix, verbose) } diff --git a/internal/scheduler/common_test.go b/internal/scheduler/common_test.go index 89c15bad01b..abb6594ec32 100644 --- a/internal/scheduler/common_test.go +++ b/internal/scheduler/common_test.go @@ -5,74 +5,11 @@ import ( "testing" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/armadaproject/armada/pkg/api" ) -func TestGetPodRequirements(t *testing.T) { - resourceLimit := v1.ResourceList{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("128Mi"), - "ephemeral-storage": resource.MustParse("8Gi"), - } - - // Hack to remove cached strings in quantities, which invalidate comparisons. - q := resourceLimit["cpu"] - q.Add(resource.Quantity{}) - resourceLimit["cpu"] = q - q = resourceLimit["memory"] - q.Add(resource.Quantity{}) - resourceLimit["memory"] = q - q = resourceLimit["ephemeral-storage"] - q.Add(resource.Quantity{}) - resourceLimit["ephemeral-storage"] = q - - requirements := v1.ResourceRequirements{ - Limits: resourceLimit, - Requests: resourceLimit, - } - - j := &api.Job{ - Id: util.NewULID(), - Queue: "test", - JobSetId: "set1", - Priority: 1, - Annotations: map[string]string{ - "something": "test", - configuration.GangIdAnnotation: "gang-id", - configuration.GangCardinalityAnnotation: "1", - }, - PodSpecs: []*v1.PodSpec{ - { - Containers: []v1.Container{ - { - Resources: requirements, - }, - }, - PriorityClassName: "armada-default", - }, - }, - } - expected := &schedulerobjects.PodRequirements{ - Priority: 1, - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: requirements, - Annotations: map[string]string{ - "something": "test", - configuration.GangIdAnnotation: "gang-id", - configuration.GangCardinalityAnnotation: "1", - }, - } - actual := j.GetPodRequirements(map[string]types.PriorityClass{"armada-default": {Priority: int32(1)}}) - assert.Equal(t, expected, actual) -} - func TestResourceListAsWeightedMillis(t *testing.T) { tests := map[string]struct { rl schedulerobjects.ResourceList diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index ffd1f610fae..9e3cf36c04e 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -1,15 +1,17 @@ package configuration import ( + "fmt" "time" "github.com/go-playground/validator/v10" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" authconfig "github.com/armadaproject/armada/internal/common/auth/configuration" - "github.com/armadaproject/armada/internal/common/config" grpcconfig "github.com/armadaproject/armada/internal/common/grpc/configuration" + "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/pkg/client" ) @@ -22,8 +24,8 @@ const ( type Configuration struct { // Database configuration Postgres configuration.PostgresConfig - // Redis Comnfig - Redis config.RedisConfig + // Armada Api Connection. Used to fetch queues. + ArmadaApi client.ApiConnectionDetails // General Pulsar configuration Pulsar configuration.PulsarConfig // Configuration controlling leader election @@ -34,7 +36,7 @@ type Configuration struct { // Due to replace metrics configured via the above entry. SchedulerMetrics MetricsConfig // Scheduler configuration (this is shared with the old scheduler) - Scheduling configuration.SchedulingConfig + Scheduling SchedulingConfig Auth authconfig.AuthConfig Grpc grpcconfig.GrpcConfig Http HttpConfig @@ -56,11 +58,15 @@ type Configuration struct { DatabaseFetchSize int `validate:"required"` // Timeout to use when sending messages to pulsar PulsarSendTimeout time.Duration `validate:"required"` + // Frequency at which queues will be fetched from the API + QueueRefreshPeriod time.Duration `validate:"required"` + // If true then submit checks will be skipped + DisableSubmitCheck bool } func (c Configuration) Validate() error { validate := validator.New() - validate.RegisterStructValidation(configuration.SchedulingConfigValidation, configuration.SchedulingConfig{}) + validate.RegisterStructValidation(SchedulingConfigValidation, SchedulingConfig{}) return validate.Struct(c) } @@ -79,9 +85,6 @@ type MetricsConfig struct { // Allowed characters in resource names are [a-zA-Z_:][a-zA-Z0-9_:]* // It can also be used to track multiple resources within the same metric, e.g., "nvidia.com/gpu" and "amd.com/gpu". ResourceRenaming map[v1.ResourceName]string - // Controls the cycle time metrics. - // TODO(albin): Not used yet. - CycleTimeConfig PrometheusSummaryConfig // The first matching regex of each error message is cached in an LRU cache. // This setting controls the cache size. MatchedRegexIndexByErrorMessageCacheSize uint64 @@ -89,22 +92,6 @@ type MetricsConfig struct { ResetInterval time.Duration } -// PrometheusSummaryConfig contains the relevant config for a prometheus.Summary. -type PrometheusSummaryConfig struct { - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Only applies to pre-calculated quantiles, does not - // apply to _sum and _count. Must be positive. The default value is - // DefMaxAge. - MaxAge time.Duration -} - type LeaderConfig struct { // Valid modes are "standalone" or "kubernetes" Mode string `validate:"required"` @@ -146,3 +133,195 @@ type HistogramConfig struct { Factor float64 Count int } + +// SchedulingConfig contains config controlling the Armada scheduler. +// +// The Armada scheduler is in charge of assigning pods to cluster and nodes. +// The Armada scheduler is part of the Armada control plane. +// +// Features: +// 1. Queuing and fairly dividing resources between users. +// 2. Fair preemption, including between jobs of equal priority to balance resource allocation. +// 3. Gang scheduling, optional across clusters, and with lower and upper bounds on the number of jobs scheduled. +// +// Note that Armada still relies on kube-scheduler for binding of pods to nodes. +// This is achieved by adding to each pod created by Armada a node selector that matches only the intended node. +type SchedulingConfig struct { + // Set to true to disable scheduling + DisableScheduling bool + // Set to true to enable scheduler assertions. This results in some performance loss. + EnableAssertions bool + // Only queues allocated more than this fraction of their fair share are considered for preemption. + ProtectedFractionOfFairShare float64 `validate:"gte=0"` + // Armada adds a node selector term to every scheduled pod using this label with the node name as value. + // This to force kube-scheduler to schedule pods on the node chosen by Armada. + // For example, if NodeIdLabel is "kubernetes.io/hostname" and armada schedules a pod on node "myNode", + // then Armada adds "kubernetes.io/hostname": "myNode" to the pod node selector before sending it to the executor. + NodeIdLabel string `validate:"required"` + // Map from priority class names to priority classes. + // Must be consistent with Kubernetes priority classes. + // I.e., priority classes defined here must be defined in all executor clusters and should map to the same priority. + PriorityClasses map[string]types.PriorityClass `validate:"dive"` + // Jobs with no priority class are assigned this priority class when ingested by the scheduler. + // Must be a key in the PriorityClasses map above. + DefaultPriorityClassName string + // If set, override the priority class name of pods with this value when sending to an executor. + PriorityClassNameOverride *string + // Number of jobs to load from the database at a time. + MaxQueueLookback uint + // In each invocation of the scheduler, no more jobs are scheduled once this limit has been exceeded. + // Note that the total scheduled resources may be greater than this limit. + MaximumResourceFractionToSchedule map[string]float64 + // Overrides MaximalClusterFractionToSchedule if set for the current pool. + MaximumResourceFractionToScheduleByPool map[string]map[string]float64 + // The rate at which Armada schedules jobs is rate-limited using a token bucket approach. + // Specifically, there is a token bucket that persists between scheduling rounds. + // The bucket fills up at a rate of MaximumSchedulingRate tokens per second and has capacity MaximumSchedulingBurst. + // A token is removed from the bucket when a scheduling a job and scheduling stops while the bucket is empty. + // + // Hence, MaximumSchedulingRate controls the maximum number of jobs scheduled per second in steady-state, + // i.e., once the burst capacity has been exhausted. + // + // Rate-limiting is based on the number of tokens available at the start of each scheduling round, + // i.e., tokens accumulated while scheduling become available at the start of the next scheduling round. + // + // For more information about the rate-limiter, see: + // https://pkg.go.dev/golang.org/x/time/rate#Limiter + MaximumSchedulingRate float64 `validate:"gt=0"` + // MaximumSchedulingBurst controls the burst capacity of the rate-limiter. + // + // There are two important implications: + // - Armada will never schedule more than MaximumSchedulingBurst jobs per scheduling round. + // - Gang jobs with cardinality greater than MaximumSchedulingBurst can never be scheduled. + MaximumSchedulingBurst int `validate:"gt=0"` + // In addition to the global rate-limiter, there is a separate rate-limiter for each queue. + // These work the same as the global rate-limiter, except they apply only to jobs scheduled from a specific queue. + // + // Per-queue version of MaximumSchedulingRate. + MaximumPerQueueSchedulingRate float64 `validate:"gt=0"` + // Per-queue version of MaximumSchedulingBurst. + MaximumPerQueueSchedulingBurst int `validate:"gt=0"` + // Maximum number of times a job is retried before considered failed. + MaxRetries uint + // List of resource names, e.g., []string{"cpu", "memory"}, to consider when computing DominantResourceFairness. + DominantResourceFairnessResourcesToConsider []string + // Resource types (e.g. memory or nvidia.com/gpu) that the scheduler keeps track of. + // Resource types not on this list will be ignored if seen on a node, and any jobs requesting them will fail. + SupportedResourceTypes []ResourceType + // Resources, e.g., "cpu", "memory", and "nvidia.com/gpu", for which the scheduler creates indexes for efficient lookup. + // This list must contain at least one resource. Adding more than one resource is not required, but may speed up scheduling. + // Ideally, this list contains all resources that frequently constrain which nodes a job can be scheduled onto. + // + // In particular, the allocatable resources on each node are rounded to a multiple of the resolution. + // Lower resolution speeds up scheduling by improving node lookup speed but may prevent scheduling jobs, + // since the allocatable resources may be rounded down to be a multiple of the resolution. + // + // See NodeDb docs for more details. + IndexedResources []ResourceType + // Node labels that the scheduler creates indexes for efficient lookup of. + // Should include node labels frequently used by node selectors on submitted jobs. + // + // If not set, no labels are indexed. + IndexedNodeLabels []string + // Taint keys that the scheduler creates indexes for efficient lookup of. + // Should include keys of taints frequently used in tolerations on submitted jobs. + // + // If not set, all taints are indexed. + IndexedTaints []string + // WellKnownNodeTypes defines a set of well-known node types used to define "home" and "away" nodes for a given priority class. + WellKnownNodeTypes []WellKnownNodeType `validate:"dive"` + // Executor that haven't heartbeated in this time period are considered stale. + // No new jobs are scheduled onto stale executors. + ExecutorTimeout time.Duration + // Maximum number of jobs that can be assigned to a executor but not yet acknowledged, before + // the scheduler is excluded from consideration by the scheduler. + MaxUnacknowledgedJobsPerExecutor uint + // If true, do not during scheduling skip jobs with requirements known to be impossible to meet. + AlwaysAttemptScheduling bool + // The frequency at which the scheduler updates the cluster state. + ExecutorUpdateFrequency time.Duration + // Controls node and queue success probability estimation. + FailureProbabilityEstimation FailureEstimatorConfig + // Controls node quarantining, i.e., removing from consideration for scheduling misbehaving nodes. + NodeQuarantining NodeQuarantinerConfig + // Controls queue quarantining, i.e., rate-limiting scheduling from misbehaving queues. + QueueQuarantining QueueQuarantinerConfig + // Defines the order in which pools will be scheduled. Higher priority pools will be scheduled first + PoolSchedulePriority map[string]int + // Default priority for pools that are not in the above list + DefaultPoolSchedulePriority int +} + +const ( + DuplicateWellKnownNodeTypeErrorMessage = "duplicate well-known node type name" + AwayNodeTypesWithoutPreemptionErrorMessage = "priority class has away node types but is not preemptible" + UnknownWellKnownNodeTypeErrorMessage = "priority class refers to unknown well-known node type" +) + +func SchedulingConfigValidation(sl validator.StructLevel) { + c := sl.Current().Interface().(SchedulingConfig) + + wellKnownNodeTypes := make(map[string]bool) + for i, wellKnownNodeType := range c.WellKnownNodeTypes { + if wellKnownNodeTypes[wellKnownNodeType.Name] { + fieldName := fmt.Sprintf("WellKnownNodeTypes[%d].Name", i) + sl.ReportError(wellKnownNodeType.Name, fieldName, "", DuplicateWellKnownNodeTypeErrorMessage, "") + } + wellKnownNodeTypes[wellKnownNodeType.Name] = true + } + + for priorityClassName, priorityClass := range c.PriorityClasses { + if len(priorityClass.AwayNodeTypes) > 0 && !priorityClass.Preemptible { + fieldName := fmt.Sprintf("Preemption.PriorityClasses[%s].Preemptible", priorityClassName) + sl.ReportError(priorityClass.Preemptible, fieldName, "", AwayNodeTypesWithoutPreemptionErrorMessage, "") + } + + for i, awayNodeType := range priorityClass.AwayNodeTypes { + if !wellKnownNodeTypes[awayNodeType.WellKnownNodeTypeName] { + fieldName := fmt.Sprintf("Preemption.PriorityClasses[%s].AwayNodeTypes[%d].WellKnownNodeTypeName", priorityClassName, i) + sl.ReportError(awayNodeType.WellKnownNodeTypeName, fieldName, "", UnknownWellKnownNodeTypeErrorMessage, "") + } + } + } +} + +// ResourceType represents a resource the scheduler indexes for efficient lookup. +type ResourceType struct { + // Resource name, e.g., "cpu", "memory", or "nvidia.com/gpu". + Name string + // Resolution with which Armada tracks this resource; larger values indicate lower resolution. + Resolution resource.Quantity +} + +// A WellKnownNodeType defines a set of nodes; see AwayNodeType. +type WellKnownNodeType struct { + // Name is the unique identifier for this node type. + Name string `validate:"required"` + // Taints is the set of taints that characterizes this node type; a node is + // part of this node type if and only if it has all of these taints. + Taints []v1.Taint +} + +// FailureEstimatorConfig controls node and queue success probability estimation. +// See internal/scheduler/failureestimator.go for details. +type FailureEstimatorConfig struct { + Disabled bool + NumInnerIterations int `validate:"gt=0"` + InnerOptimiserStepSize float64 `validate:"gt=0"` + OuterOptimiserStepSize float64 `validate:"gt=0"` + OuterOptimiserNesterovAcceleration float64 `validate:"gte=0"` +} + +// NodeQuarantinerConfig controls how nodes are quarantined, i.e., removed from consideration when scheduling new jobs. +// See internal/scheduler/quarantine/node_quarantiner.go for details. +type NodeQuarantinerConfig struct { + FailureProbabilityQuarantineThreshold float64 `validate:"gte=0,lte=1"` + FailureProbabilityEstimateTimeout time.Duration `validate:"gte=0"` +} + +// QueueQuarantinerConfig controls how scheduling from misbehaving queues is rate-limited. +// See internal/scheduler/quarantine/queue_quarantiner.go for details. +type QueueQuarantinerConfig struct { + QuarantineFactorMultiplier float64 `validate:"gte=0,lte=1"` + FailureProbabilityEstimateTimeout time.Duration `validate:"gte=0"` +} diff --git a/internal/scheduler/configuration/configuration_test.go b/internal/scheduler/configuration/configuration_test.go index 00b17af46d7..149a2b54b3c 100644 --- a/internal/scheduler/configuration/configuration_test.go +++ b/internal/scheduler/configuration/configuration_test.go @@ -6,14 +6,13 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/types" ) func TestSchedulingConfigValidate(t *testing.T) { c := Configuration{ - Scheduling: configuration.SchedulingConfig{ - WellKnownNodeTypes: []configuration.WellKnownNodeType{ + Scheduling: SchedulingConfig{ + WellKnownNodeTypes: []WellKnownNodeType{ { Name: "gpu", Taints: []v1.Taint{{Key: "gpu", Value: "true", Effect: v1.TaintEffectNoSchedule}}, @@ -38,9 +37,9 @@ func TestSchedulingConfigValidate(t *testing.T) { }, } expected := []string{ - configuration.DuplicateWellKnownNodeTypeErrorMessage, - configuration.AwayNodeTypesWithoutPreemptionErrorMessage, - configuration.UnknownWellKnownNodeTypeErrorMessage, + DuplicateWellKnownNodeTypeErrorMessage, + AwayNodeTypesWithoutPreemptionErrorMessage, + UnknownWellKnownNodeTypeErrorMessage, } err := c.Validate() diff --git a/internal/scheduler/constraints/constraints.go b/internal/scheduler/constraints/constraints.go index 816b7425295..05162ebd29b 100644 --- a/internal/scheduler/constraints/constraints.go +++ b/internal/scheduler/constraints/constraints.go @@ -7,10 +7,11 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/armadaproject/armada/pkg/client/queue" + "github.com/armadaproject/armada/pkg/api" ) const ( @@ -33,8 +34,7 @@ const ( GangExceedsGlobalBurstSizeUnschedulableReason = "gang cardinality too large: exceeds global max burst size" GangExceedsQueueBurstSizeUnschedulableReason = "gang cardinality too large: exceeds queue max burst size" - UnschedulableReasonMaximumResourcesPerQueueExceeded = "per-queue resource limit exceeded" - UnschedulableReasonMaximumResourcesExceeded = "resource limit exceeded" + UnschedulableReasonMaximumResourcesExceeded = "resource limit exceeded" ) // IsTerminalUnschedulableReason returns true if reason indicates @@ -58,31 +58,31 @@ func IsTerminalQueueUnschedulableReason(reason string) bool { // SchedulingConstraints contains scheduling constraints, e.g., per-queue resource limits. type SchedulingConstraints struct { // Max number of jobs to consider for a queue before giving up. - MaxQueueLookback uint + maxQueueLookBack uint // Jobs leased to this executor must be at least this large. // Used, e.g., to avoid scheduling CPU-only jobs onto clusters with GPUs. - MinimumJobSize schedulerobjects.ResourceList + minimumJobSize map[string]resource.Quantity // Scheduling constraints by priority class. - PriorityClassSchedulingConstraintsByPriorityClassName map[string]PriorityClassSchedulingConstraints + priorityClassSchedulingConstraintsByPriorityClassName map[string]priorityClassSchedulingConstraints // Scheduling constraints for specific queues. - // If present for a particular queue, global limits (i.e., PriorityClassSchedulingConstraintsByPriorityClassName) + // If present for a particular queue, global limits (i.e., priorityClassSchedulingConstraintsByPriorityClassName) // do not apply for that queue. - QueueSchedulingConstraintsByQueueName map[string]QueueSchedulingConstraints + queueSchedulingConstraintsByQueueName map[string]queueSchedulingConstraints // Limits total resources scheduled per invocation. - MaximumResourcesToSchedule schedulerobjects.ResourceList + maximumResourcesToSchedule map[string]resource.Quantity } -// QueueSchedulingConstraints contains per-queue scheduling constraints. -type QueueSchedulingConstraints struct { +// queueSchedulingConstraints contains per-queue scheduling constraints. +type queueSchedulingConstraints struct { // Scheduling constraints by priority class. - PriorityClassSchedulingConstraintsByPriorityClassName map[string]PriorityClassSchedulingConstraints + PriorityClassSchedulingConstraintsByPriorityClassName map[string]priorityClassSchedulingConstraints } -// PriorityClassSchedulingConstraints contains scheduling constraints that apply to jobs of a specific priority class. -type PriorityClassSchedulingConstraints struct { +// priorityClassSchedulingConstraints contains scheduling constraints that apply to jobs of a specific priority class. +type priorityClassSchedulingConstraints struct { PriorityClassName string // Limits total resources allocated to jobs of this priority class per queue. - MaximumResourcesPerQueue schedulerobjects.ResourceList + MaximumResourcesPerQueue map[string]resource.Quantity } func NewSchedulingConstraints( @@ -90,37 +90,37 @@ func NewSchedulingConstraints( totalResources schedulerobjects.ResourceList, minimumJobSize schedulerobjects.ResourceList, config configuration.SchedulingConfig, - queues []queue.Queue, + queues []*api.Queue, ) SchedulingConstraints { - priorityClassSchedulingConstraintsByPriorityClassName := make(map[string]PriorityClassSchedulingConstraints, len(config.PriorityClasses)) + priorityClassSchedulingConstraintsByPriorityClassName := make(map[string]priorityClassSchedulingConstraints, len(config.PriorityClasses)) for name, priorityClass := range config.PriorityClasses { maximumResourceFractionPerQueue := priorityClass.MaximumResourceFractionPerQueue if m, ok := priorityClass.MaximumResourceFractionPerQueueByPool[pool]; ok { // Use pool-specific config is available. - maximumResourceFractionPerQueue = m + maximumResourceFractionPerQueue = util.MergeMaps(maximumResourceFractionPerQueue, m) } - priorityClassSchedulingConstraintsByPriorityClassName[name] = PriorityClassSchedulingConstraints{ + priorityClassSchedulingConstraintsByPriorityClassName[name] = priorityClassSchedulingConstraints{ PriorityClassName: name, - MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources, maximumResourceFractionPerQueue), + MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources.Resources, maximumResourceFractionPerQueue), } } - queueSchedulingConstraintsByQueueName := make(map[string]QueueSchedulingConstraints, len(queues)) + queueSchedulingConstraintsByQueueName := make(map[string]queueSchedulingConstraints, len(queues)) for _, queue := range queues { - priorityClassSchedulingConstraintsByPriorityClassNameForQueue := make(map[string]PriorityClassSchedulingConstraints, len(queue.ResourceLimitsByPriorityClassName)) + priorityClassSchedulingConstraintsByPriorityClassNameForQueue := make(map[string]priorityClassSchedulingConstraints, len(queue.ResourceLimitsByPriorityClassName)) for priorityClassName, priorityClassResourceLimits := range queue.ResourceLimitsByPriorityClassName { maximumResourceFraction := priorityClassResourceLimits.MaximumResourceFraction if m, ok := priorityClassResourceLimits.MaximumResourceFractionByPool[pool]; ok { // Use pool-specific maximum resource fraction if available. - maximumResourceFraction = m.MaximumResourceFraction + maximumResourceFraction = util.MergeMaps(maximumResourceFraction, m.MaximumResourceFraction) } - priorityClassSchedulingConstraintsByPriorityClassNameForQueue[priorityClassName] = PriorityClassSchedulingConstraints{ + priorityClassSchedulingConstraintsByPriorityClassNameForQueue[priorityClassName] = priorityClassSchedulingConstraints{ PriorityClassName: priorityClassName, - MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources, maximumResourceFraction), + MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources.Resources, maximumResourceFraction), } } if len(priorityClassSchedulingConstraintsByPriorityClassNameForQueue) > 0 { - queueSchedulingConstraintsByQueueName[queue.Name] = QueueSchedulingConstraints{ + queueSchedulingConstraintsByQueueName[queue.Name] = queueSchedulingConstraints{ PriorityClassSchedulingConstraintsByPriorityClassName: priorityClassSchedulingConstraintsByPriorityClassNameForQueue, } } @@ -132,18 +132,18 @@ func NewSchedulingConstraints( maximumResourceFractionToSchedule = m } return SchedulingConstraints{ - MaxQueueLookback: config.MaxQueueLookback, - MinimumJobSize: minimumJobSize, - MaximumResourcesToSchedule: absoluteFromRelativeLimits(totalResources, maximumResourceFractionToSchedule), - PriorityClassSchedulingConstraintsByPriorityClassName: priorityClassSchedulingConstraintsByPriorityClassName, - QueueSchedulingConstraintsByQueueName: queueSchedulingConstraintsByQueueName, + maxQueueLookBack: config.MaxQueueLookback, + minimumJobSize: minimumJobSize.Resources, + maximumResourcesToSchedule: absoluteFromRelativeLimits(totalResources.Resources, maximumResourceFractionToSchedule), + priorityClassSchedulingConstraintsByPriorityClassName: priorityClassSchedulingConstraintsByPriorityClassName, + queueSchedulingConstraintsByQueueName: queueSchedulingConstraintsByQueueName, } } -func absoluteFromRelativeLimits(totalResources schedulerobjects.ResourceList, relativeLimits map[string]float64) schedulerobjects.ResourceList { - absoluteLimits := schedulerobjects.NewResourceList(len(relativeLimits)) +func absoluteFromRelativeLimits(totalResources map[string]resource.Quantity, relativeLimits map[string]float64) map[string]resource.Quantity { + absoluteLimits := make(map[string]resource.Quantity, len(relativeLimits)) for t, f := range relativeLimits { - absoluteLimits.Set(t, ScaleQuantity(totalResources.Get(t).DeepCopy(), f)) + absoluteLimits[t] = ScaleQuantity(totalResources[t].DeepCopy(), f) } return absoluteLimits } @@ -157,8 +157,8 @@ func ScaleQuantity(q resource.Quantity, f float64) resource.Quantity { } func (constraints *SchedulingConstraints) CheckRoundConstraints(sctx *schedulercontext.SchedulingContext, queue string) (bool, string, error) { - // MaximumResourcesToSchedule check. - if !sctx.ScheduledResources.IsStrictlyLessOrEqual(constraints.MaximumResourcesToSchedule) { + // maximumResourcesToSchedule check. + if !isStrictlyLessOrEqual(sctx.ScheduledResources.Resources, constraints.maximumResourcesToSchedule) { return false, MaximumResourcesScheduledUnschedulableReason, nil } return true, "", nil @@ -174,7 +174,7 @@ func (constraints *SchedulingConstraints) CheckConstraints( } // Check that the job is large enough for this executor. - if ok, unschedulableReason := RequestsAreLargeEnough(gctx.TotalResourceRequests, constraints.MinimumJobSize); !ok { + if ok, unschedulableReason := RequestsAreLargeEnough(gctx.TotalResourceRequests.Resources, constraints.minimumJobSize); !ok { return false, unschedulableReason, nil } @@ -202,30 +202,56 @@ func (constraints *SchedulingConstraints) CheckConstraints( return false, QueueRateLimitExceededByGangUnschedulableReason, nil } - // QueueSchedulingConstraintsByQueueName / PriorityClassSchedulingConstraintsByPriorityClassName checks. - if queueConstraint, ok := constraints.QueueSchedulingConstraintsByQueueName[gctx.Queue]; ok { + // queueSchedulingConstraintsByQueueName / priorityClassSchedulingConstraintsByPriorityClassName checks. + queueAndPriorityClassResourceLimits := constraints.getQueueAndPriorityClassResourceLimits(gctx) + priorityClassResourceLimits := constraints.getPriorityClassResourceLimits(gctx) + overallResourceLimits := util.MergeMaps(priorityClassResourceLimits, queueAndPriorityClassResourceLimits) + if !isStrictlyLessOrEqual(qctx.AllocatedByPriorityClass[gctx.PriorityClassName].Resources, overallResourceLimits) { + return false, UnschedulableReasonMaximumResourcesExceeded, nil + } + + return true, "", nil +} + +func (constraints *SchedulingConstraints) getQueueAndPriorityClassResourceLimits(gctx *schedulercontext.GangSchedulingContext) map[string]resource.Quantity { + if queueConstraint, ok := constraints.queueSchedulingConstraintsByQueueName[gctx.Queue]; ok { if priorityClassConstraint, ok := queueConstraint.PriorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { - if !qctx.AllocatedByPriorityClass[gctx.PriorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { - return false, UnschedulableReasonMaximumResourcesPerQueueExceeded, nil - } - } - } else { - if priorityClassConstraint, ok := constraints.PriorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { - if !qctx.AllocatedByPriorityClass[gctx.PriorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { - return false, UnschedulableReasonMaximumResourcesExceeded, nil - } + return priorityClassConstraint.MaximumResourcesPerQueue } } + return map[string]resource.Quantity{} +} - return true, "", nil +func (constraints *SchedulingConstraints) getPriorityClassResourceLimits(gctx *schedulercontext.GangSchedulingContext) map[string]resource.Quantity { + if priorityClassConstraint, ok := constraints.priorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { + return priorityClassConstraint.MaximumResourcesPerQueue + } + return map[string]resource.Quantity{} } -func RequestsAreLargeEnough(totalResourceRequests, minRequest schedulerobjects.ResourceList) (bool, string) { - for t, minQuantity := range minRequest.Resources { - q := totalResourceRequests.Get(t) +func RequestsAreLargeEnough(totalResourceRequests, minRequest map[string]resource.Quantity) (bool, string) { + for t, minQuantity := range minRequest { + q := totalResourceRequests[t] if minQuantity.Cmp(q) == 1 { return false, fmt.Sprintf("job requests %s %s, but the minimum is %s", q.String(), t, minQuantity.String()) } } return true, "" } + +func (constraints *SchedulingConstraints) GetMaxQueueLookBack() uint { + return constraints.maxQueueLookBack +} + +// isStrictlyLessOrEqual returns false if +// - there is a quantity in b greater than that in a or +// - there is a non-zero quantity in b not in a +// and true otherwise. +func isStrictlyLessOrEqual(a map[string]resource.Quantity, b map[string]resource.Quantity) bool { + for t, q := range b { + if q.Cmp(a[t]) == -1 { + return false + } + } + return true +} diff --git a/internal/scheduler/constraints/constraints_test.go b/internal/scheduler/constraints/constraints_test.go index c2b17cf2aa3..d31d7f860db 100644 --- a/internal/scheduler/constraints/constraints_test.go +++ b/internal/scheduler/constraints/constraints_test.go @@ -4,20 +4,16 @@ import ( "testing" "time" - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client/queue" - - "golang.org/x/time/rate" - - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/api/resource" + "github.com/armadaproject/armada/internal/common/types" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/pkg/api" ) type constraintTest struct { @@ -37,14 +33,14 @@ func TestConstraints(t *testing.T) { makeResourceList("1000", "1000Gi"), makeResourceList("0", "0"), makeSchedulingConfig(), - []queue.Queue{}, + []*api.Queue{}, )), "empty-queue-constraints": makeConstraintsTest(NewSchedulingConstraints( "pool-1", makeResourceList("1000", "1000Gi"), makeResourceList("0", "0"), makeSchedulingConfig(), - []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{}}}, + []*api.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{}}}, )), "within-constraints": makeConstraintsTest(NewSchedulingConstraints( "pool-1", @@ -55,7 +51,7 @@ func TestConstraints(t *testing.T) { MaxQueueLookback: 1000, PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.9, "memory": 0.9}}}}, }, - []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, + []*api.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, )), "exceeds-queue-priority-class-constraint": func() *constraintTest { t := makeConstraintsTest(NewSchedulingConstraints( @@ -63,7 +59,7 @@ func TestConstraints(t *testing.T) { makeResourceList("1000", "1000Gi"), makeResourceList("0", "0"), makeSchedulingConfig(), - []queue.Queue{ + []*api.Queue{ { Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ @@ -74,7 +70,7 @@ func TestConstraints(t *testing.T) { }, }, )) - t.expectedCheckConstraintsReason = "per-queue resource limit exceeded" + t.expectedCheckConstraintsReason = "resource limit exceeded" return t }(), "exceeds-queue-priority-class-pool-constraint": func() *constraintTest { @@ -83,7 +79,7 @@ func TestConstraints(t *testing.T) { makeResourceList("1000", "1000Gi"), makeResourceList("0", "0"), makeSchedulingConfig(), - []queue.Queue{ + []*api.Queue{ { Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ @@ -98,7 +94,7 @@ func TestConstraints(t *testing.T) { }, }, )) - t.expectedCheckConstraintsReason = "per-queue resource limit exceeded" + t.expectedCheckConstraintsReason = "resource limit exceeded" return t }(), "exceeds-priority-class-constraint": func() *constraintTest { @@ -111,7 +107,7 @@ func TestConstraints(t *testing.T) { MaxQueueLookback: 1000, PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.00000001, "memory": 0.9}}}}, }, - []queue.Queue{}, + []*api.Queue{}, )) t.expectedCheckConstraintsReason = "resource limit exceeded" return t @@ -125,15 +121,40 @@ func TestConstraints(t *testing.T) { MaxQueueLookback: 1000, PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.00000001, "memory": 0.9}}}}, }, - []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, + []*api.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, )), + "one-constraint-per-level-falls-back-as-expected--within-limits": makeMultiLevelConstraintsTest( + map[string]resource.Quantity{"a": resource.MustParse("99"), "b": resource.MustParse("19"), "c": resource.MustParse("2.9"), "d": resource.MustParse("0.39")}, + "", + "", + ), + "one-constraint-per-level-falls-back-as-expected--a-exceeds-limits": makeMultiLevelConstraintsTest( + map[string]resource.Quantity{"a": resource.MustParse("101"), "b": resource.MustParse("19"), "c": resource.MustParse("2.9"), "d": resource.MustParse("0.39")}, + UnschedulableReasonMaximumResourcesExceeded, + "", + ), + "one-constraint-per-level-falls-back-as-expected--b-exceeds-limits": makeMultiLevelConstraintsTest( + map[string]resource.Quantity{"a": resource.MustParse("99"), "b": resource.MustParse("21"), "c": resource.MustParse("2.9"), "d": resource.MustParse("0.39")}, + UnschedulableReasonMaximumResourcesExceeded, + "", + ), + "one-constraint-per-level-falls-back-as-expected--c-exceeds-limits": makeMultiLevelConstraintsTest( + map[string]resource.Quantity{"a": resource.MustParse("99"), "b": resource.MustParse("19"), "c": resource.MustParse("3.1"), "d": resource.MustParse("0.39")}, + UnschedulableReasonMaximumResourcesExceeded, + "", + ), + "one-constraint-per-level-falls-back-as-expected--d-exceeds-limits": makeMultiLevelConstraintsTest( + map[string]resource.Quantity{"a": resource.MustParse("99"), "b": resource.MustParse("19"), "c": resource.MustParse("2.9"), "d": resource.MustParse("0.41")}, + UnschedulableReasonMaximumResourcesExceeded, + "", + ), "below-minimum-job-size": func() *constraintTest { t := makeConstraintsTest(NewSchedulingConstraints( "pool-1", makeResourceList("1000", "1000Gi"), makeResourceList("5", "1Mi"), makeSchedulingConfig(), - []queue.Queue{}, + []*api.Queue{}, )) t.expectedCheckConstraintsReason = "job requests 1 cpu, but the minimum is 5" return t @@ -147,7 +168,7 @@ func TestConstraints(t *testing.T) { MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.00001, "memory": 0.1}, MaxQueueLookback: 1000, }, - []queue.Queue{}, + []*api.Queue{}, )) t.expectedCheckRoundConstraintsReason = "maximum resources scheduled" return t @@ -168,6 +189,83 @@ func TestConstraints(t *testing.T) { } } +func makeMultiLevelConstraintsTest(requirements map[string]resource.Quantity, expectedCheckConstraintsReason string, expectedCheckRoundConstraintsReason string) *constraintTest { + zeroResources := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"a": resource.MustParse("0"), "b": resource.MustParse("0"), "c": resource.MustParse("0"), "d": resource.MustParse("0")}, + } + return &constraintTest{ + constraints: makeMultiLevelConstraints(), + sctx: &schedulercontext.SchedulingContext{ + Pool: "pool-1", + WeightSum: 100, + ScheduledResources: zeroResources.DeepCopy(), + Limiter: rate.NewLimiter(1e9, 1e6), + QueueSchedulingContexts: map[string]*schedulercontext.QueueSchedulingContext{ + "queue-1": { + Queue: "queue-1", + Weight: 1, + Limiter: rate.NewLimiter(1e9, 1e6), + Allocated: zeroResources.DeepCopy(), + AllocatedByPriorityClass: schedulerobjects.QuantityByTAndResourceType[string]{"priority-class-1": schedulerobjects.ResourceList{ + Resources: requirements, + }}, + }, + }, + Started: time.Now(), + }, + gctx: &schedulercontext.GangSchedulingContext{ + GangInfo: schedulercontext.GangInfo{ + PriorityClassName: "priority-class-1", + }, + Queue: "queue-1", + TotalResourceRequests: schedulerobjects.ResourceList{Resources: requirements}, + JobSchedulingContexts: []*schedulercontext.JobSchedulingContext{{}}, + }, + queue: "queue-1", + priorityClassName: "priority-class-1", + expectedCheckConstraintsReason: expectedCheckConstraintsReason, + expectedCheckRoundConstraintsReason: expectedCheckRoundConstraintsReason, + } +} + +func makeMultiLevelConstraints() SchedulingConstraints { + return NewSchedulingConstraints( + "pool-1", + schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"a": resource.MustParse("1000"), "b": resource.MustParse("1000"), "c": resource.MustParse("1000"), "d": resource.MustParse("1000")}}, + schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"a": resource.MustParse("0"), "b": resource.MustParse("0"), "c": resource.MustParse("0"), "d": resource.MustParse("0")}}, + configuration.SchedulingConfig{ + MaxQueueLookback: 1000, + PriorityClasses: map[string]types.PriorityClass{ + "priority-class-1": { + MaximumResourceFractionPerQueue: map[string]float64{ + "a": 0.0001, "b": 0.0002, "c": 0.0003, "d": 0.0004, + }, + MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{ + "pool-1": { + "a": 0.001, "b": 0.002, "c": 0.003, + }, + }, + }, + }, + }, + []*api.Queue{ + { + Name: "queue-1", + ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ + "priority-class-1": { + MaximumResourceFraction: map[string]float64{"a": 0.01, "b": 0.02}, + MaximumResourceFractionByPool: map[string]api.PriorityClassPoolResourceLimits{ + "pool-1": { + MaximumResourceFraction: map[string]float64{"a": 0.1}, + }, + }, + }, + }, + }, + }, + ) +} + func TestScaleQuantity(t *testing.T) { tests := map[string]struct { input resource.Quantity @@ -231,6 +329,90 @@ func makeConstraintsTest(constraints SchedulingConstraints) *constraintTest { } } +func TestIsStrictlyLessOrEqual(t *testing.T) { + tests := map[string]struct { + a map[string]resource.Quantity + b map[string]resource.Quantity + expected bool + }{ + "both empty": { + a: make(map[string]resource.Quantity), + b: make(map[string]resource.Quantity), + expected: true, + }, + "zero and missing is equal": { + a: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("0"), + }, + b: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + }, + expected: true, + }, + "simple equal": { + a: map[string]resource.Quantity{ + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "foo": resource.MustParse("3"), + }, + b: map[string]resource.Quantity{ + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "foo": resource.MustParse("3"), + }, + expected: true, + }, + "simple true": { + a: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("2"), + }, + b: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("3"), + }, + expected: true, + }, + "simple false": { + a: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("3"), + }, + b: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("2"), + }, + expected: false, + }, + "present in a missing in b true": { + a: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("2"), + }, + b: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + }, + expected: true, + }, + "missing in a present in b true": { + a: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + }, + b: map[string]resource.Quantity{ + "foo": resource.MustParse("1"), + "bar": resource.MustParse("2"), + }, + expected: true, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.expected, isStrictlyLessOrEqual(tc.a, tc.b)) + }) + } +} + func makeSchedulingConfig() configuration.SchedulingConfig { return configuration.SchedulingConfig{ MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.1, "memory": 0.1}, diff --git a/internal/scheduler/context/context.go b/internal/scheduler/context/context.go index f20b8c0cd34..865c96955e9 100644 --- a/internal/scheduler/context/context.go +++ b/internal/scheduler/context/context.go @@ -22,16 +22,11 @@ import ( "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/fairness" "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" + "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// defaultSchedulingKeyGenerator is used for computing scheduling keys for legacy api.Job where one is not pre-computed. -var defaultSchedulingKeyGenerator *schedulerobjects.SchedulingKeyGenerator - -func init() { - defaultSchedulingKeyGenerator = schedulerobjects.NewSchedulingKeyGenerator() -} - // SchedulingContext contains information necessary for scheduling and records what happened in a scheduling round. type SchedulingContext struct { // Time at which the scheduling cycle started. @@ -220,18 +215,16 @@ func (sctx *SchedulingContext) ReportString(verbosity int32) string { func (sctx *SchedulingContext) AddGangSchedulingContext(gctx *GangSchedulingContext) (bool, error) { allJobsEvictedInThisRound := true - numberOfSuccessfulJobs := 0 + allJobsSuccessful := true for _, jctx := range gctx.JobSchedulingContexts { evictedInThisRound, err := sctx.AddJobSchedulingContext(jctx) if err != nil { return false, err } allJobsEvictedInThisRound = allJobsEvictedInThisRound && evictedInThisRound - if jctx.IsSuccessful() { - numberOfSuccessfulJobs++ - } + allJobsSuccessful = allJobsSuccessful && jctx.IsSuccessful() } - if numberOfSuccessfulJobs >= gctx.GangInfo.MinimumCardinality && !allJobsEvictedInThisRound { + if allJobsSuccessful && !allJobsEvictedInThisRound { sctx.NumScheduledGangs++ } return allJobsEvictedInThisRound, nil @@ -240,9 +233,9 @@ func (sctx *SchedulingContext) AddGangSchedulingContext(gctx *GangSchedulingCont // AddJobSchedulingContext adds a job scheduling context. // Automatically updates scheduled resources. func (sctx *SchedulingContext) AddJobSchedulingContext(jctx *JobSchedulingContext) (bool, error) { - qctx, ok := sctx.QueueSchedulingContexts[jctx.Job.GetQueue()] + qctx, ok := sctx.QueueSchedulingContexts[jctx.Job.Queue()] if !ok { - return false, errors.Errorf("failed adding job %s to scheduling context: no context for queue %s", jctx.JobId, jctx.Job.GetQueue()) + return false, errors.Errorf("failed adding job %s to scheduling context: no context for queue %s", jctx.JobId, jctx.Job.Queue()) } evictedInThisRound, err := qctx.AddJobSchedulingContext(jctx) if err != nil { @@ -251,18 +244,18 @@ func (sctx *SchedulingContext) AddJobSchedulingContext(jctx *JobSchedulingContex if jctx.IsSuccessful() { if evictedInThisRound { sctx.EvictedResources.SubV1ResourceList(jctx.PodRequirements.ResourceRequirements.Requests) - sctx.EvictedResourcesByPriorityClass.SubV1ResourceList(jctx.Job.GetPriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) + sctx.EvictedResourcesByPriorityClass.SubV1ResourceList(jctx.Job.PriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) sctx.NumEvictedJobs-- } else { sctx.ScheduledResources.AddV1ResourceList(jctx.PodRequirements.ResourceRequirements.Requests) - sctx.ScheduledResourcesByPriorityClass.AddV1ResourceList(jctx.Job.GetPriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) + sctx.ScheduledResourcesByPriorityClass.AddV1ResourceList(jctx.Job.PriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) sctx.NumScheduledJobs++ } } return evictedInThisRound, nil } -func (sctx *SchedulingContext) EvictGang(jobs []interfaces.LegacySchedulerJob) (bool, error) { +func (sctx *SchedulingContext) EvictGang(jobs []*jobdb.Job) (bool, error) { allJobsScheduledInThisRound := true for _, job := range jobs { scheduledInThisRound, err := sctx.EvictJob(job) @@ -277,23 +270,23 @@ func (sctx *SchedulingContext) EvictGang(jobs []interfaces.LegacySchedulerJob) ( return allJobsScheduledInThisRound, nil } -func (sctx *SchedulingContext) EvictJob(job interfaces.LegacySchedulerJob) (bool, error) { - qctx, ok := sctx.QueueSchedulingContexts[job.GetQueue()] +func (sctx *SchedulingContext) EvictJob(job *jobdb.Job) (bool, error) { + qctx, ok := sctx.QueueSchedulingContexts[job.Queue()] if !ok { - return false, errors.Errorf("failed evicting job %s from scheduling context: no context for queue %s", job.GetId(), job.GetQueue()) + return false, errors.Errorf("failed evicting job %s from scheduling context: no context for queue %s", job.Id(), job.Queue()) } scheduledInThisRound, err := qctx.EvictJob(job) if err != nil { return false, err } - rl := job.GetResourceRequirements().Requests + rl := job.ResourceRequirements().Requests if scheduledInThisRound { sctx.ScheduledResources.SubV1ResourceList(rl) - sctx.ScheduledResourcesByPriorityClass.SubV1ResourceList(job.GetPriorityClassName(), rl) + sctx.ScheduledResourcesByPriorityClass.SubV1ResourceList(job.PriorityClassName(), rl) sctx.NumScheduledJobs-- } else { sctx.EvictedResources.AddV1ResourceList(rl) - sctx.EvictedResourcesByPriorityClass.AddV1ResourceList(job.GetPriorityClassName(), rl) + sctx.EvictedResourcesByPriorityClass.AddV1ResourceList(job.PriorityClassName(), rl) sctx.NumEvictedJobs++ } return scheduledInThisRound, nil @@ -364,13 +357,6 @@ type QueueSchedulingContext struct { EvictedJobsById map[string]bool } -func GetSchedulingContextFromQueueSchedulingContext(qctx *QueueSchedulingContext) *SchedulingContext { - if qctx == nil { - return nil - } - return qctx.SchedulingContext -} - func (qctx *QueueSchedulingContext) String() string { return qctx.ReportString(0) } @@ -481,16 +467,16 @@ func (qctx *QueueSchedulingContext) AddJobSchedulingContext(jctx *JobSchedulingC // Always update ResourcesByPriority. // Since ResourcesByPriority is used to order queues by fraction of fair share. qctx.Allocated.AddV1ResourceList(jctx.PodRequirements.ResourceRequirements.Requests) - qctx.AllocatedByPriorityClass.AddV1ResourceList(jctx.Job.GetPriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) + qctx.AllocatedByPriorityClass.AddV1ResourceList(jctx.Job.PriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) // Only if the job is not evicted, update ScheduledResourcesByPriority. // Since ScheduledResourcesByPriority is used to control per-round scheduling constraints. if evictedInThisRound { delete(qctx.EvictedJobsById, jctx.JobId) - qctx.EvictedResourcesByPriorityClass.SubV1ResourceList(jctx.Job.GetPriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) + qctx.EvictedResourcesByPriorityClass.SubV1ResourceList(jctx.Job.PriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) } else { qctx.SuccessfulJobSchedulingContexts[jctx.JobId] = jctx - qctx.ScheduledResourcesByPriorityClass.AddV1ResourceList(jctx.Job.GetPriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) + qctx.ScheduledResourcesByPriorityClass.AddV1ResourceList(jctx.Job.PriorityClassName(), jctx.PodRequirements.ResourceRequirements.Requests) } } else { qctx.UnsuccessfulJobSchedulingContexts[jctx.JobId] = jctx @@ -498,25 +484,25 @@ func (qctx *QueueSchedulingContext) AddJobSchedulingContext(jctx *JobSchedulingC return evictedInThisRound, nil } -func (qctx *QueueSchedulingContext) EvictJob(job interfaces.LegacySchedulerJob) (bool, error) { - jobId := job.GetId() +func (qctx *QueueSchedulingContext) EvictJob(job *jobdb.Job) (bool, error) { + jobId := job.Id() if _, ok := qctx.UnsuccessfulJobSchedulingContexts[jobId]; ok { return false, errors.Errorf("failed evicting job %s from queue: job already marked unsuccessful", jobId) } if _, ok := qctx.EvictedJobsById[jobId]; ok { return false, errors.Errorf("failed evicting job %s from queue: job already marked evicted", jobId) } - rl := job.GetResourceRequirements().Requests + rl := job.ResourceRequirements().Requests _, scheduledInThisRound := qctx.SuccessfulJobSchedulingContexts[jobId] if scheduledInThisRound { - qctx.ScheduledResourcesByPriorityClass.SubV1ResourceList(job.GetPriorityClassName(), rl) + qctx.ScheduledResourcesByPriorityClass.SubV1ResourceList(job.PriorityClassName(), rl) delete(qctx.SuccessfulJobSchedulingContexts, jobId) } else { - qctx.EvictedResourcesByPriorityClass.AddV1ResourceList(job.GetPriorityClassName(), rl) + qctx.EvictedResourcesByPriorityClass.AddV1ResourceList(job.PriorityClassName(), rl) qctx.EvictedJobsById[jobId] = true } qctx.Allocated.SubV1ResourceList(rl) - qctx.AllocatedByPriorityClass.SubV1ResourceList(job.GetPriorityClassName(), rl) + qctx.AllocatedByPriorityClass.SubV1ResourceList(job.PriorityClassName(), rl) return scheduledInThisRound, nil } @@ -551,7 +537,7 @@ func NewGangSchedulingContext(jctxs []*JobSchedulingContext) *GangSchedulingCont representative := jctxs[0] return &GangSchedulingContext{ Created: time.Now(), - Queue: representative.Job.GetQueue(), + Queue: representative.Job.Queue(), GangInfo: representative.GangInfo, JobSchedulingContexts: jctxs, TotalResourceRequests: totalResourceRequests, @@ -613,10 +599,12 @@ type JobSchedulingContext struct { // Indicates whether this context is for re-scheduling an evicted job. IsEvicted bool // Job spec. - Job interfaces.LegacySchedulerJob + Job *jobdb.Job // Scheduling requirements of this job. // We currently require that each job contains exactly one pod spec. PodRequirements *schedulerobjects.PodRequirements + // Resource requirements in an efficient internaltypes.ResourceList + ResourceRequirements internaltypes.ResourceList // Node selectors to consider in addition to those included with the PodRequirements. // These are added as part of scheduling to further constrain where nodes are scheduled, // e.g., to ensure evicted jobs are re-scheduled onto the same node. @@ -635,8 +623,6 @@ type JobSchedulingContext struct { // GangInfo holds all the information that is necessary to schedule a gang, // such as the lower and upper bounds on its size. GangInfo - // If set, indicates this job should be failed back to the client when the gang is scheduled. - ShouldFail bool } func (jctx *JobSchedulingContext) String() string { @@ -663,11 +649,7 @@ func (jctx *JobSchedulingContext) SchedulingKey() (schedulerobjects.SchedulingKe if len(jctx.AdditionalNodeSelectors) != 0 || len(jctx.AdditionalTolerations) != 0 { return schedulerobjects.EmptySchedulingKey, false } - schedulingKey, ok := jctx.Job.GetSchedulingKey() - if !ok { - schedulingKey = interfaces.SchedulingKeyFromLegacySchedulerJob(defaultSchedulingKeyGenerator, jctx.Job) - } - return schedulingKey, true + return jctx.Job.SchedulingKey(), true } func (jctx *JobSchedulingContext) IsSuccessful() bool { @@ -699,31 +681,29 @@ func (jctx *JobSchedulingContext) GetNodeSelector(key string) (string, bool) { } type GangInfo struct { - Id string - Cardinality int - MinimumCardinality int - PriorityClassName string - NodeUniformity string + Id string + Cardinality int + PriorityClassName string + NodeUniformity string } // EmptyGangInfo returns a GangInfo for a job that is not in a gang. func EmptyGangInfo(job interfaces.MinimalJob) GangInfo { return GangInfo{ // An Id of "" indicates that this job is not in a gang; we set - // Cardinality and MinimumCardinality (as well as the other fields, + // Cardinality (as well as the other fields, // which all make sense in this context) accordingly. - Id: "", - Cardinality: 1, - MinimumCardinality: 1, - PriorityClassName: job.GetPriorityClassName(), - NodeUniformity: job.GetAnnotations()[configuration.GangNodeUniformityLabelAnnotation], + Id: "", + Cardinality: 1, + PriorityClassName: job.PriorityClassName(), + NodeUniformity: job.Annotations()[configuration.GangNodeUniformityLabelAnnotation], } } func GangInfoFromLegacySchedulerJob(job interfaces.MinimalJob) (GangInfo, error) { gangInfo := EmptyGangInfo(job) - annotations := job.GetAnnotations() + annotations := job.Annotations() gangId, ok := annotations[configuration.GangIdAnnotation] if !ok { @@ -745,48 +725,31 @@ func GangInfoFromLegacySchedulerJob(job interfaces.MinimalJob) (GangInfo, error) return gangInfo, errors.Errorf("gang cardinality %d is non-positive", gangCardinality) } - gangMinimumCardinalityString, ok := annotations[configuration.GangMinimumCardinalityAnnotation] - if !ok { - // If it is not set, use gangCardinality as the minimum gang size. - gangMinimumCardinalityString = gangCardinalityString - } - gangMinimumCardinality, err := strconv.Atoi(gangMinimumCardinalityString) - if err != nil { - return gangInfo, errors.WithStack(err) - } - if gangMinimumCardinality <= 0 { - return gangInfo, errors.Errorf("gang minimum cardinality %d is non-positive", gangMinimumCardinality) - } - if gangMinimumCardinality > gangCardinality { - return gangInfo, errors.Errorf("gang minimum cardinality %d is greater than gang cardinality %d", gangMinimumCardinality, gangCardinality) - } - gangInfo.Id = gangId gangInfo.Cardinality = gangCardinality - gangInfo.MinimumCardinality = gangMinimumCardinality return gangInfo, nil } -func JobSchedulingContextsFromJobs[J interfaces.LegacySchedulerJob](priorityClasses map[string]types.PriorityClass, jobs []J) []*JobSchedulingContext { +func JobSchedulingContextsFromJobs[J *jobdb.Job](priorityClasses map[string]types.PriorityClass, jobs []J) []*JobSchedulingContext { jctxs := make([]*JobSchedulingContext, len(jobs)) for i, job := range jobs { - jctxs[i] = JobSchedulingContextFromJob(priorityClasses, job) + jctxs[i] = JobSchedulingContextFromJob(job) } return jctxs } -func JobSchedulingContextFromJob(priorityClasses map[string]types.PriorityClass, job interfaces.LegacySchedulerJob) *JobSchedulingContext { +func JobSchedulingContextFromJob(job *jobdb.Job) *JobSchedulingContext { gangInfo, err := GangInfoFromLegacySchedulerJob(job) if err != nil { - logrus.Errorf("failed to extract gang info from job %s: %s", job.GetId(), err) + logrus.Errorf("failed to extract gang info from job %s: %s", job.Id(), err) } return &JobSchedulingContext{ - Created: time.Now(), - JobId: job.GetId(), - Job: job, - PodRequirements: job.GetPodRequirements(priorityClasses), - GangInfo: gangInfo, - ShouldFail: false, + Created: time.Now(), + JobId: job.Id(), + Job: job, + PodRequirements: job.PodRequirements(), + ResourceRequirements: job.EfficientResourceRequirements(), + GangInfo: gangInfo, } } diff --git a/internal/scheduler/context/context_test.go b/internal/scheduler/context/context_test.go index 61cf9c08322..e538f966986 100644 --- a/internal/scheduler/context/context_test.go +++ b/internal/scheduler/context/context_test.go @@ -89,9 +89,10 @@ func testNSmallCpuJobSchedulingContext(queue, priorityClassName string, n int) [ func testSmallCpuJobSchedulingContext(queue, priorityClassName string) *JobSchedulingContext { job := testfixtures.Test1Cpu4GiJob(queue, priorityClassName) return &JobSchedulingContext{ - JobId: job.GetId(), - Job: job, - PodRequirements: job.GetPodRequirements(testfixtures.TestPriorityClasses), - GangInfo: EmptyGangInfo(job), + JobId: job.Id(), + Job: job, + PodRequirements: job.PodRequirements(), + ResourceRequirements: job.EfficientResourceRequirements(), + GangInfo: EmptyGangInfo(job), } } diff --git a/internal/scheduler/database/db_pruner.go b/internal/scheduler/database/db_pruner.go index 8da7dd7935d..b6b8ae2501f 100644 --- a/internal/scheduler/database/db_pruner.go +++ b/internal/scheduler/database/db_pruner.go @@ -5,7 +5,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/armadacontext" ) diff --git a/internal/scheduler/database/db_pruner_test.go b/internal/scheduler/database/db_pruner_test.go index 1a30c200463..7c19347d109 100644 --- a/internal/scheduler/database/db_pruner_test.go +++ b/internal/scheduler/database/db_pruner_test.go @@ -9,10 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/database" + armadaslices "github.com/armadaproject/armada/internal/common/slices" commonutil "github.com/armadaproject/armada/internal/common/util" ) @@ -113,7 +114,7 @@ func TestPruneDb_RemoveJobs(t *testing.T) { testClock := clock.NewFakeClock(baseTime) // Set up db - jobsToInsert := commonutil.Map(tc.jobs, populateRequiredJobFields) + jobsToInsert := armadaslices.Map(tc.jobs, populateRequiredJobFields) err := removeTriggers(ctx, db) require.NoError(t, err) err = database.UpsertWithTransaction(ctx, db, "jobs", jobsToInsert) diff --git a/internal/scheduler/database/job_repository.go b/internal/scheduler/database/job_repository.go index 630b41ef726..4cc77ed88d6 100644 --- a/internal/scheduler/database/job_repository.go +++ b/internal/scheduler/database/job_repository.go @@ -160,6 +160,7 @@ func (r *PostgresJobRepository) FetchJobUpdates(ctx *armadacontext.Context, jobS Queue: row.Queue, Priority: row.Priority, Submitted: row.Submitted, + Validated: row.Validated, Queued: row.Queued, QueuedVersion: row.QueuedVersion, CancelRequested: row.CancelRequested, @@ -170,6 +171,7 @@ func (r *PostgresJobRepository) FetchJobUpdates(ctx *armadacontext.Context, jobS SchedulingInfo: row.SchedulingInfo, SchedulingInfoVersion: row.SchedulingInfoVersion, Serial: row.Serial, + Pools: row.Pools, } } diff --git a/internal/scheduler/database/migrations/009_add_validated.sql b/internal/scheduler/database/migrations/009_add_validated.sql new file mode 100644 index 00000000000..e4813d0594e --- /dev/null +++ b/internal/scheduler/database/migrations/009_add_validated.sql @@ -0,0 +1 @@ +ALTER TABLE jobs ADD COLUMN validated boolean NOT NULL DEFAULT false; diff --git a/internal/scheduler/database/migrations/010_set_validated_true.sql b/internal/scheduler/database/migrations/010_set_validated_true.sql new file mode 100644 index 00000000000..ba3b940723d --- /dev/null +++ b/internal/scheduler/database/migrations/010_set_validated_true.sql @@ -0,0 +1 @@ +update jobs set validated = true diff --git a/internal/scheduler/database/migrations/011_add_pools.sql b/internal/scheduler/database/migrations/011_add_pools.sql new file mode 100644 index 00000000000..08c746e4e27 --- /dev/null +++ b/internal/scheduler/database/migrations/011_add_pools.sql @@ -0,0 +1 @@ +ALTER TABLE jobs ADD COLUMN pools text[]; diff --git a/internal/scheduler/database/models.go b/internal/scheduler/database/models.go index a48323bc970..bb39956db63 100644 --- a/internal/scheduler/database/models.go +++ b/internal/scheduler/database/models.go @@ -36,6 +36,8 @@ type Job struct { SchedulingInfoVersion int32 `db:"scheduling_info_version"` Serial int64 `db:"serial"` LastModified time.Time `db:"last_modified"` + Validated bool `db:"validated"` + Pools []string `db:"pools"` } type JobRunError struct { diff --git a/internal/scheduler/database/query.sql.go b/internal/scheduler/database/query.sql.go index c2a904c06a7..9851043f7e2 100644 --- a/internal/scheduler/database/query.sql.go +++ b/internal/scheduler/database/query.sql.go @@ -133,11 +133,17 @@ func (q *Queries) MarkJobRunsSucceededById(ctx context.Context, runIds []uuid.UU } const markJobsCancelRequestedById = `-- name: MarkJobsCancelRequestedById :exec -UPDATE jobs SET cancel_requested = true WHERE job_id = ANY($1::text[]) +UPDATE jobs SET cancel_requested = true WHERE queue = $1 and job_set = $2 and job_id = ANY($3::text[]) ` -func (q *Queries) MarkJobsCancelRequestedById(ctx context.Context, jobIds []string) error { - _, err := q.db.Exec(ctx, markJobsCancelRequestedById, jobIds) +type MarkJobsCancelRequestedByIdParams struct { + Queue string `db:"queue"` + JobSet string `db:"job_set"` + JobIds []string `db:"job_ids"` +} + +func (q *Queries) MarkJobsCancelRequestedById(ctx context.Context, arg MarkJobsCancelRequestedByIdParams) error { + _, err := q.db.Exec(ctx, markJobsCancelRequestedById, arg.Queue, arg.JobSet, arg.JobIds) return err } @@ -393,7 +399,7 @@ func (q *Queries) SelectJobsForExecutor(ctx context.Context, arg SelectJobsForEx } const selectNewJobs = `-- name: SelectNewJobs :many -SELECT job_id, job_set, queue, user_id, submitted, groups, priority, queued, queued_version, cancel_requested, cancelled, cancel_by_jobset_requested, succeeded, failed, submit_message, scheduling_info, scheduling_info_version, serial, last_modified FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 +SELECT job_id, job_set, queue, user_id, submitted, groups, priority, queued, queued_version, cancel_requested, cancelled, cancel_by_jobset_requested, succeeded, failed, submit_message, scheduling_info, scheduling_info_version, serial, last_modified, validated, pools FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 ` type SelectNewJobsParams struct { @@ -430,6 +436,8 @@ func (q *Queries) SelectNewJobs(ctx context.Context, arg SelectNewJobsParams) ([ &i.SchedulingInfoVersion, &i.Serial, &i.LastModified, + &i.Validated, + &i.Pools, ); err != nil { return nil, err } @@ -577,7 +585,7 @@ func (q *Queries) SelectRunErrorsById(ctx context.Context, runIds []uuid.UUID) ( } const selectUpdatedJobs = `-- name: SelectUpdatedJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 +SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 ` type SelectUpdatedJobsParams struct { @@ -586,21 +594,23 @@ type SelectUpdatedJobsParams struct { } type SelectUpdatedJobsRow struct { - JobID string `db:"job_id"` - JobSet string `db:"job_set"` - Queue string `db:"queue"` - Priority int64 `db:"priority"` - Submitted int64 `db:"submitted"` - Queued bool `db:"queued"` - QueuedVersion int32 `db:"queued_version"` - CancelRequested bool `db:"cancel_requested"` - CancelByJobsetRequested bool `db:"cancel_by_jobset_requested"` - Cancelled bool `db:"cancelled"` - Succeeded bool `db:"succeeded"` - Failed bool `db:"failed"` - SchedulingInfo []byte `db:"scheduling_info"` - SchedulingInfoVersion int32 `db:"scheduling_info_version"` - Serial int64 `db:"serial"` + JobID string `db:"job_id"` + JobSet string `db:"job_set"` + Queue string `db:"queue"` + Priority int64 `db:"priority"` + Submitted int64 `db:"submitted"` + Queued bool `db:"queued"` + QueuedVersion int32 `db:"queued_version"` + Validated bool `db:"validated"` + CancelRequested bool `db:"cancel_requested"` + CancelByJobsetRequested bool `db:"cancel_by_jobset_requested"` + Cancelled bool `db:"cancelled"` + Succeeded bool `db:"succeeded"` + Failed bool `db:"failed"` + SchedulingInfo []byte `db:"scheduling_info"` + SchedulingInfoVersion int32 `db:"scheduling_info_version"` + Pools []string `db:"pools"` + Serial int64 `db:"serial"` } func (q *Queries) SelectUpdatedJobs(ctx context.Context, arg SelectUpdatedJobsParams) ([]SelectUpdatedJobsRow, error) { @@ -620,6 +630,7 @@ func (q *Queries) SelectUpdatedJobs(ctx context.Context, arg SelectUpdatedJobsPa &i.Submitted, &i.Queued, &i.QueuedVersion, + &i.Validated, &i.CancelRequested, &i.CancelByJobsetRequested, &i.Cancelled, @@ -627,6 +638,7 @@ func (q *Queries) SelectUpdatedJobs(ctx context.Context, arg SelectUpdatedJobsPa &i.Failed, &i.SchedulingInfo, &i.SchedulingInfoVersion, + &i.Pools, &i.Serial, ); err != nil { return nil, err @@ -696,16 +708,23 @@ func (q *Queries) SetTerminatedTime(ctx context.Context, arg SetTerminatedTimePa } const updateJobPriorityById = `-- name: UpdateJobPriorityById :exec -UPDATE jobs SET priority = $1 WHERE job_id = $2 +UPDATE jobs SET priority = $1 WHERE queue = $2 and job_set = $3 and job_id = ANY($4::text[]) ` type UpdateJobPriorityByIdParams struct { - Priority int64 `db:"priority"` - JobID string `db:"job_id"` + Priority int64 `db:"priority"` + Queue string `db:"queue"` + JobSet string `db:"job_set"` + JobIds []string `db:"job_ids"` } func (q *Queries) UpdateJobPriorityById(ctx context.Context, arg UpdateJobPriorityByIdParams) error { - _, err := q.db.Exec(ctx, updateJobPriorityById, arg.Priority, arg.JobID) + _, err := q.db.Exec(ctx, updateJobPriorityById, + arg.Priority, + arg.Queue, + arg.JobSet, + arg.JobIds, + ) return err } diff --git a/internal/scheduler/database/query/query.sql b/internal/scheduler/database/query/query.sql index 4b3dc424b27..91504e1ea84 100644 --- a/internal/scheduler/database/query/query.sql +++ b/internal/scheduler/database/query/query.sql @@ -5,7 +5,7 @@ SELECT * FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; SELECT job_id FROM jobs; -- name: SelectUpdatedJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; +SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; -- name: UpdateJobPriorityByJobSet :exec UPDATE jobs SET priority = $1 WHERE job_set = $2 and queue = $3; @@ -17,7 +17,7 @@ UPDATE jobs SET cancel_by_jobset_requested = true WHERE job_set = sqlc.arg(job_s UPDATE jobs SET succeeded = true WHERE job_id = ANY(sqlc.arg(job_ids)::text[]); -- name: MarkJobsCancelRequestedById :exec -UPDATE jobs SET cancel_requested = true WHERE job_id = ANY(sqlc.arg(job_ids)::text[]); +UPDATE jobs SET cancel_requested = true WHERE queue = sqlc.arg(queue) and job_set = sqlc.arg(job_set) and job_id = ANY(sqlc.arg(job_ids)::text[]); -- name: MarkJobsCancelledById :exec UPDATE jobs SET cancelled = true WHERE job_id = ANY(sqlc.arg(job_ids)::text[]); @@ -26,7 +26,7 @@ UPDATE jobs SET cancelled = true WHERE job_id = ANY(sqlc.arg(job_ids)::text[]); UPDATE jobs SET failed = true WHERE job_id = ANY(sqlc.arg(job_ids)::text[]); -- name: UpdateJobPriorityById :exec -UPDATE jobs SET priority = $1 WHERE job_id = $2; +UPDATE jobs SET priority = $1 WHERE queue = sqlc.arg(queue) and job_set = sqlc.arg(job_set) and job_id = ANY(sqlc.arg(job_ids)::text[]); -- name: SelectNewRuns :many SELECT * FROM runs WHERE serial > $1 ORDER BY serial LIMIT $2; diff --git a/internal/scheduler/gang_scheduler.go b/internal/scheduler/gang_scheduler.go index 3dd21513e9d..5fb28d49fbe 100644 --- a/internal/scheduler/gang_scheduler.go +++ b/internal/scheduler/gang_scheduler.go @@ -7,10 +7,10 @@ import ( "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/common/slices" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -63,7 +63,7 @@ func (sch *GangScheduler) updateGangSchedulingContextOnSuccess(gctx *schedulerco func (sch *GangScheduler) updateGangSchedulingContextOnFailure(gctx *schedulercontext.GangSchedulingContext, gangAddedToSchedulingContext bool, unschedulableReason string) error { // If the job was added to the context, remove it first. if gangAddedToSchedulingContext { - failedJobs := util.Map(gctx.JobSchedulingContexts, func(jctx *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { return jctx.Job }) + failedJobs := slices.Map(gctx.JobSchedulingContexts, func(jctx *schedulercontext.JobSchedulingContext) *jobdb.Job { return jctx.Job }) if _, err := sch.schedulingContext.EvictGang(failedJobs); err != nil { return err } @@ -226,18 +226,9 @@ func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn * } else { unschedulableReason = "job does not fit on any node" } - } else { - // When a gang schedules successfully, update state for failed jobs if they exist. - for _, jctx := range gctx.JobSchedulingContexts { - if jctx.ShouldFail { - jctx.Fail("job does not fit on any node") - } - } } - return } - return } diff --git a/internal/scheduler/gang_scheduler_test.go b/internal/scheduler/gang_scheduler_test.go index e72453ecc16..67c2086baae 100644 --- a/internal/scheduler/gang_scheduler_test.go +++ b/internal/scheduler/gang_scheduler_test.go @@ -11,12 +11,12 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" @@ -69,32 +69,6 @@ func TestGangScheduler(t *testing.T) { ExpectedCumulativeScheduledJobs: []int{0}, ExpectedRuntimeGangCardinality: []int{0}, }, - "simple success where min cardinality is met": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 32, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), - ), - }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedCumulativeScheduledJobs: []int{32}, - ExpectedRuntimeGangCardinality: []int{32}, - }, - "simple failure where min cardinality is not met": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 33, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), - ), - }, - ExpectedScheduledIndices: nil, - ExpectedCumulativeScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, - }, "one success and one failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), @@ -106,20 +80,6 @@ func TestGangScheduler(t *testing.T) { ExpectedCumulativeScheduledJobs: []int{32, 32}, ExpectedRuntimeGangCardinality: []int{32, 0}, }, - "one success and one failure using min cardinality": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 32, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), - ), - testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), - }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedCumulativeScheduledJobs: []int{32, 32}, - ExpectedRuntimeGangCardinality: []int{32, 0}, - }, "multiple nodes": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), @@ -212,7 +172,7 @@ func TestGangScheduler(t *testing.T) { }, "resolution has no impact on jobs of size a multiple of the resolution": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( - []configuration.IndexedResource{ + []configuration.ResourceType{ {Name: "cpu", Resolution: resource.MustParse("16")}, {Name: "memory", Resolution: resource.MustParse("128Mi")}, }, @@ -233,7 +193,7 @@ func TestGangScheduler(t *testing.T) { }, "jobs of size not a multiple of the resolution blocks scheduling new jobs": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( - []configuration.IndexedResource{ + []configuration.ResourceType{ {Name: "cpu", Resolution: resource.MustParse("17")}, {Name: "memory", Resolution: resource.MustParse("128Mi")}, }, @@ -252,10 +212,10 @@ func TestGangScheduler(t *testing.T) { }, "consider all nodes in the bucket": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( - []configuration.IndexedResource{ + []configuration.ResourceType{ {Name: "cpu", Resolution: resource.MustParse("1")}, {Name: "memory", Resolution: resource.MustParse("1Mi")}, - {Name: "gpu", Resolution: resource.MustParse("1")}, + {Name: "nvidia.com/gpu", Resolution: resource.MustParse("1")}, }, testfixtures.TestSchedulingConfig(), ), @@ -264,9 +224,9 @@ func TestGangScheduler(t *testing.T) { 0, schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31.5"), - "memory": resource.MustParse("512Gi"), - "gpu": resource.MustParse("8"), + "cpu": resource.MustParse("31.5"), + "memory": resource.MustParse("512Gi"), + "nvidia.com/gpu": resource.MustParse("8"), }, }, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), @@ -400,11 +360,10 @@ func TestGangScheduler(t *testing.T) { )..., ), Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, + testfixtures.WithGangAnnotationsJobs( testfixtures.WithNodeUniformityLabelAnnotationJobs( "my-cool-node-uniformity", - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 4), + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 3), ), ), }, @@ -413,43 +372,6 @@ func TestGangScheduler(t *testing.T) { ExpectedNodeUniformity: map[int]string{0: "b"}, ExpectedRuntimeGangCardinality: []int{3}, }, - "NodeUniformityLabel PreemptedAtPriority tiebreak": { - SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( - []string{"my-cool-node-uniformity"}, - testfixtures.TestSchedulingConfig(), - ), - Nodes: append( - testfixtures.WithUsedResourcesNodes( - 1, - schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}}, - testfixtures.WithLabelsNodes( - map[string]string{"my-cool-node-uniformity": "a"}, - testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - ), - ), - testfixtures.WithUsedResourcesNodes( - 0, - schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}}, - testfixtures.WithLabelsNodes( - map[string]string{"my-cool-node-uniformity": "b"}, - testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - ), - )..., - ), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "my-cool-node-uniformity", - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass2, 4), - ), - ), - }, - ExpectedScheduledIndices: []int{0}, - ExpectedCumulativeScheduledJobs: []int{2}, - ExpectedNodeUniformity: map[int]string{0: "b"}, - ExpectedRuntimeGangCardinality: []int{2}, - }, "AwayNodeTypes": { SchedulingConfig: func() configuration.SchedulingConfig { config := testfixtures.TestSchedulingConfig() @@ -509,7 +431,10 @@ func TestGangScheduler(t *testing.T) { Gangs: func() (gangs [][]*jobdb.Job) { var jobId ulid.ULID jobId = util.ULID() - gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test1Cpu4GiPodReqs("A", jobId, 30000))}) + gangs = append(gangs, []*jobdb.Job{ + testfixtures. + TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test1Cpu4GiPodReqs("A", jobId, 30000)), + }) jobId = util.ULID() gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away-both", testfixtures.Test1Cpu4GiPodReqs("A", jobId, 30000))}) return @@ -568,18 +493,28 @@ func TestGangScheduler(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { + // This is hacktabulous. Essentially the jobs have the wrong priority classes set at this point + // because textfixtures.TestJob() initialises the jobs using a jobDb that doesn't know anything about the + // priority classes we have defined in this test. We therefore need to fix the priority classes here. + // The long term strategy is to try and remove the need to have a jobDB for creating jobs. + for i, gang := range tc.Gangs { + for j, job := range gang { + tc.Gangs[i][j] = job.WithPriorityClass(tc.SchedulingConfig.PriorityClasses[job.PriorityClassName()]) + } + } + nodesById := make(map[string]*schedulerobjects.Node, len(tc.Nodes)) for _, node := range tc.Nodes { nodesById[node.Id] = node } nodeDb, err := nodedb.NewNodeDb( tc.SchedulingConfig.PriorityClasses, - tc.SchedulingConfig.MaxExtraNodesToConsider, tc.SchedulingConfig.IndexedResources, tc.SchedulingConfig.IndexedTaints, tc.SchedulingConfig.IndexedNodeLabels, tc.SchedulingConfig.WellKnownNodeTypes, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) require.NoError(t, err) txn := nodeDb.Txn(true) @@ -595,7 +530,7 @@ func TestGangScheduler(t *testing.T) { priorityFactorByQueue := make(map[string]float64) for _, jobs := range tc.Gangs { for _, job := range jobs { - priorityFactorByQueue[job.GetQueue()] = 1 + priorityFactorByQueue[job.Queue()] = 1 } } @@ -677,16 +612,6 @@ func TestGangScheduler(t *testing.T) { } } - // Verify any excess jobs that failed have the correct state set - for _, jctx := range jctxs { - if jctx.ShouldFail { - if jctx.PodSchedulingContext != nil { - require.Equal(t, "", jctx.PodSchedulingContext.NodeId) - } - require.Equal(t, "job does not fit on any node", jctx.UnschedulableReason) - } - } - // Verify accounting scheduledGangs++ require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) diff --git a/internal/scheduler/interfaces/interfaces.go b/internal/scheduler/interfaces/interfaces.go index c24d43300c9..d5ef8a3c84c 100644 --- a/internal/scheduler/interfaces/interfaces.go +++ b/internal/scheduler/interfaces/interfaces.go @@ -1,64 +1,6 @@ package interfaces -import ( - "time" - - v1 "k8s.io/api/core/v1" - - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" -) - type MinimalJob interface { - GetAnnotations() map[string]string - GetPriorityClassName() string -} - -// LegacySchedulerJob is the job interface used throughout the scheduler. -type LegacySchedulerJob interface { - GetId() string - GetQueue() string - GetJobSet() string - GetPerQueuePriority() uint32 - GetSubmitTime() time.Time - GetAnnotations() map[string]string - GetPodRequirements(priorityClasses map[string]types.PriorityClass) *schedulerobjects.PodRequirements - GetPriorityClassName() string - GetScheduledAtPriority() (int32, bool) - GetNodeSelector() map[string]string - GetAffinity() *v1.Affinity - GetTolerations() []v1.Toleration - GetResourceRequirements() v1.ResourceRequirements - GetQueueTtlSeconds() int64 - // GetSchedulingKey returns (schedulingKey, true) if the job has a scheduling key associated with it and - // (emptySchedulingKey, false) otherwise, where emptySchedulingKey is the zero value of the SchedulingKey type. - GetSchedulingKey() (schedulerobjects.SchedulingKey, bool) - // SchedulingOrderCompare defines the order in which jobs in a queue should be scheduled - // (both when scheduling new jobs and when re-scheduling evicted jobs). - // Specifically, compare returns - // - 0 if the jobs have equal job id, - // - -1 if job should be scheduled before other, - // - +1 if other should be scheduled before other. - SchedulingOrderCompare(other LegacySchedulerJob) int -} - -func PriorityClassFromLegacySchedulerJob(priorityClasses map[string]types.PriorityClass, defaultPriorityClassName string, job LegacySchedulerJob) types.PriorityClass { - priorityClassName := job.GetPriorityClassName() - if priorityClass, ok := priorityClasses[priorityClassName]; ok { - return priorityClass - } - // We could return (types.PriorityClass{}, false) here, but then callers - // might handle this situation in different ways; return the default - // priority class in order to enforce uniformity. - return priorityClasses[defaultPriorityClassName] -} - -func SchedulingKeyFromLegacySchedulerJob(skg *schedulerobjects.SchedulingKeyGenerator, job LegacySchedulerJob) schedulerobjects.SchedulingKey { - return skg.Key( - job.GetNodeSelector(), - job.GetAffinity(), - job.GetTolerations(), - job.GetResourceRequirements().Requests, - job.GetPriorityClassName(), - ) + Annotations() map[string]string + PriorityClassName() string } diff --git a/internal/scheduler/internaltypes/node.go b/internal/scheduler/internaltypes/node.go index b7c516db39b..8d7e61fe626 100644 --- a/internal/scheduler/internaltypes/node.go +++ b/internal/scheduler/internaltypes/node.go @@ -4,10 +4,8 @@ import ( "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" - armadamaps "github.com/armadaproject/armada/internal/common/maps" "github.com/armadaproject/armada/internal/scheduler/kubernetesobjects/label" koTaint "github.com/armadaproject/armada/internal/scheduler/kubernetesobjects/taint" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) type Node struct { @@ -27,14 +25,14 @@ type Node struct { taints []v1.Taint labels map[string]string - TotalResources schedulerobjects.ResourceList + TotalResources ResourceList // This field is set when inserting the Node into a NodeDb. Keys [][]byte - AllocatableByPriority schedulerobjects.AllocatableByPriorityAndResourceType - AllocatedByQueue map[string]schedulerobjects.ResourceList - AllocatedByJobId map[string]schedulerobjects.ResourceList + AllocatableByPriority map[int32]ResourceList + AllocatedByQueue map[string]ResourceList + AllocatedByJobId map[string]ResourceList EvictedJobRunIds map[string]bool } @@ -46,10 +44,10 @@ func CreateNode( name string, taints []v1.Taint, labels map[string]string, - totalResources schedulerobjects.ResourceList, - allocatableByPriority schedulerobjects.AllocatableByPriorityAndResourceType, - allocatedByQueue map[string]schedulerobjects.ResourceList, - allocatedByJobId map[string]schedulerobjects.ResourceList, + totalResources ResourceList, + allocatableByPriority map[int32]ResourceList, + allocatedByQueue map[string]ResourceList, + allocatedByJobId map[string]ResourceList, evictedJobRunIds map[string]bool, keys [][]byte, ) *Node { @@ -62,9 +60,9 @@ func CreateNode( taints: koTaint.DeepCopyTaints(taints), labels: deepCopyLabels(labels), TotalResources: totalResources, - AllocatableByPriority: allocatableByPriority, - AllocatedByQueue: allocatedByQueue, - AllocatedByJobId: allocatedByJobId, + AllocatableByPriority: maps.Clone(allocatableByPriority), + AllocatedByQueue: maps.Clone(allocatedByQueue), + AllocatedByJobId: maps.Clone(allocatedByJobId), EvictedJobRunIds: evictedJobRunIds, Keys: keys, } @@ -135,9 +133,9 @@ func (node *Node) UnsafeCopy() *Node { Keys: nil, - AllocatableByPriority: armadamaps.DeepCopy(node.AllocatableByPriority), - AllocatedByQueue: armadamaps.DeepCopy(node.AllocatedByQueue), - AllocatedByJobId: armadamaps.DeepCopy(node.AllocatedByJobId), + AllocatableByPriority: maps.Clone(node.AllocatableByPriority), + AllocatedByQueue: maps.Clone(node.AllocatedByQueue), + AllocatedByJobId: maps.Clone(node.AllocatedByJobId), EvictedJobRunIds: maps.Clone(node.EvictedJobRunIds), } } diff --git a/internal/scheduler/internaltypes/node_test.go b/internal/scheduler/internaltypes/node_test.go index f58eedfb437..5425069232c 100644 --- a/internal/scheduler/internaltypes/node_test.go +++ b/internal/scheduler/internaltypes/node_test.go @@ -7,10 +7,16 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + schedulerconfiguration "github.com/armadaproject/armada/internal/scheduler/configuration" ) func TestNode(t *testing.T) { + resourceListFactory, err := MakeResourceListFactory([]schedulerconfiguration.ResourceType{ + {Name: "memory", Resolution: resource.MustParse("1")}, + {Name: "cpu", Resolution: resource.MustParse("1m")}, + }) + assert.Nil(t, err) + const id = "id" const nodeTypeId = uint64(123) const index = uint64(1) @@ -25,47 +31,47 @@ func TestNode(t *testing.T) { labels := map[string]string{ "key": "value", } - totalResources := schedulerobjects.ResourceList{ - Resources: map[string]resource.Quantity{ + totalResources := resourceListFactory.FromNodeProto( + map[string]resource.Quantity{ "cpu": resource.MustParse("16"), "memory": resource.MustParse("32Gi"), }, - } - allocatableByPriority := schedulerobjects.AllocatableByPriorityAndResourceType{ - 1: { - Resources: map[string]resource.Quantity{ + ) + allocatableByPriority := map[int32]ResourceList{ + 1: resourceListFactory.FromNodeProto( + map[string]resource.Quantity{ "cpu": resource.MustParse("0"), "memory": resource.MustParse("0Gi"), }, - }, - 2: { - Resources: map[string]resource.Quantity{ + ), + 2: resourceListFactory.FromNodeProto( + map[string]resource.Quantity{ "cpu": resource.MustParse("8"), "memory": resource.MustParse("16Gi"), }, - }, - 3: { - Resources: map[string]resource.Quantity{ + ), + 3: resourceListFactory.FromNodeProto( + map[string]resource.Quantity{ "cpu": resource.MustParse("16"), "memory": resource.MustParse("32Gi"), }, - }, + ), } - allocatedByQueue := map[string]schedulerobjects.ResourceList{ - "queue": { - Resources: map[string]resource.Quantity{ + allocatedByQueue := map[string]ResourceList{ + "queue": resourceListFactory.FromJobResourceListIgnoreUnknown( + map[string]resource.Quantity{ "cpu": resource.MustParse("8"), "memory": resource.MustParse("16Gi"), }, - }, + ), } - allocatedByJobId := map[string]schedulerobjects.ResourceList{ - "jobId": { - Resources: map[string]resource.Quantity{ + allocatedByJobId := map[string]ResourceList{ + "jobId": resourceListFactory.FromJobResourceListIgnoreUnknown( + map[string]resource.Quantity{ "cpu": resource.MustParse("8"), "memory": resource.MustParse("16Gi"), }, - }, + ), } evictedJobRunIds := map[string]bool{ "jobId": false, diff --git a/internal/scheduler/internaltypes/quantity_util.go b/internal/scheduler/internaltypes/quantity_util.go new file mode 100644 index 00000000000..1857ed2cce1 --- /dev/null +++ b/internal/scheduler/internaltypes/quantity_util.go @@ -0,0 +1,18 @@ +package internaltypes + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func QuantityToInt64RoundUp(q resource.Quantity, scale resource.Scale) int64 { + return q.ScaledValue(scale) +} + +func QuantityToInt64RoundDown(q resource.Quantity, scale resource.Scale) int64 { + result := q.ScaledValue(scale) + q2 := resource.NewScaledQuantity(result, scale) + if q2.Cmp(q) > 0 { + result-- + } + return result +} diff --git a/internal/scheduler/internaltypes/quantity_util_test.go b/internal/scheduler/internaltypes/quantity_util_test.go new file mode 100644 index 00000000000..7300906bca6 --- /dev/null +++ b/internal/scheduler/internaltypes/quantity_util_test.go @@ -0,0 +1,69 @@ +package internaltypes + +import ( + "testing" + + "gopkg.in/inf.v0" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" +) + +type quantityTest struct { + q resource.Quantity + expectedIntRoundDown int64 + expectedIntRoundUp int64 +} + +func TestQuantityToInt64_WithScaleMillis(t *testing.T) { + tests := []quantityTest{ + {resource.MustParse("0"), 0, 0}, + {resource.MustParse("1"), 1000, 1000}, + {resource.MustParse("1m"), 1, 1}, + {resource.MustParse("50m"), 50, 50}, + {resource.MustParse("1Mi"), 1024 * 1024 * 1000, 1024 * 1024 * 1000}, + {resource.MustParse("1e3"), 1000 * 1000, 1000 * 1000}, + {*resource.NewMilliQuantity(1, resource.DecimalExponent), 1, 1}, + {*resource.NewDecimalQuantity(*inf.NewDec(1, inf.Scale(0)), resource.DecimalExponent), 1000, 1000}, + {resource.MustParse("1n"), 0, 1}, + {resource.MustParse("100n"), 0, 1}, + {resource.MustParse("999999999n"), 999, 1000}, + {resource.MustParse("1000000000n"), 1000, 1000}, + {resource.MustParse("1000000001n"), 1000, 1001}, + {resource.MustParse("12.3m"), 12, 13}, + {resource.MustParse("0.99m"), 0, 1}, + {resource.MustParse("1.001m"), 1, 2}, + } + + for _, test := range tests { + assert.Equal(t, test.expectedIntRoundDown, QuantityToInt64RoundDown(test.q, resource.Milli), test.q) + assert.Equal(t, test.expectedIntRoundUp, QuantityToInt64RoundUp(test.q, resource.Milli), test.q) + } +} + +func TestQuantityToInt64_WithUnitScale(t *testing.T) { + const tebi = 1024 * 1024 * 1024 * 1024 + tests := []quantityTest{ + {resource.MustParse("0"), 0, 0}, + {resource.MustParse("1"), 1, 1}, + {resource.MustParse("1m"), 0, 1}, + {resource.MustParse("50m"), 0, 1}, + {resource.MustParse("1Mi"), 1024 * 1024, 1024 * 1024}, + {resource.MustParse("1.5Mi"), 1536 * 1024, 1536 * 1024}, + {resource.MustParse("4Ti"), 4 * tebi, 4 * tebi}, + {resource.MustParse("100000Ti"), 100000 * tebi, 100000 * tebi}, + {resource.MustParse("1e3"), 1000, 1000}, + {*resource.NewMilliQuantity(1, resource.DecimalExponent), 0, 1}, + {*resource.NewDecimalQuantity(*inf.NewDec(1, inf.Scale(0)), resource.DecimalExponent), 1, 1}, + {resource.MustParse("1n"), 0, 1}, + {resource.MustParse("100n"), 0, 1}, + {resource.MustParse("999999999n"), 0, 1}, + {resource.MustParse("1000000000n"), 1, 1}, + {resource.MustParse("1000000001n"), 1, 2}, + } + + for _, test := range tests { + assert.Equal(t, test.expectedIntRoundDown, QuantityToInt64RoundDown(test.q, resource.Scale(0)), test.q) + assert.Equal(t, test.expectedIntRoundUp, QuantityToInt64RoundUp(test.q, resource.Scale(0)), test.q) + } +} diff --git a/internal/scheduler/internaltypes/resource_list.go b/internal/scheduler/internaltypes/resource_list.go new file mode 100644 index 00000000000..7d1556c4166 --- /dev/null +++ b/internal/scheduler/internaltypes/resource_list.go @@ -0,0 +1,205 @@ +package internaltypes + +import ( + "fmt" + + "golang.org/x/exp/slices" + k8sResource "k8s.io/apimachinery/pkg/api/resource" +) + +type ResourceList struct { + resources []int64 // immutable, do not change this, return a new struct instead! + factory *ResourceListFactory // immutable, do not change this! +} + +type Resource struct { + Name string + Value int64 + Scale k8sResource.Scale +} + +func (rl ResourceList) Equal(other ResourceList) bool { + if rl.IsEmpty() && other.IsEmpty() { + return true + } + if rl.IsEmpty() || other.IsEmpty() { + return false + } + return slices.Equal(rl.resources, other.resources) +} + +func (rl ResourceList) String() string { + if rl.IsEmpty() { + return "empty" + } + result := "" + for i, name := range rl.factory.indexToName { + if i > 0 { + result += " " + } + result += fmt.Sprintf("%s=%s", name, rl.asQuantity(i).String()) + } + return result +} + +func (rl ResourceList) GetByName(name string) (int64, error) { + if rl.IsEmpty() { + return 0, fmt.Errorf("resource type %s not found as resource list is empty", name) + } + index, ok := rl.factory.nameToIndex[name] + if !ok { + return 0, fmt.Errorf("resource type %s not found", name) + } + return rl.resources[index], nil +} + +func (rl ResourceList) GetByNameZeroIfMissing(name string) int64 { + if rl.IsEmpty() { + return 0 + } + index, ok := rl.factory.nameToIndex[name] + if !ok { + return 0 + } + return rl.resources[index] +} + +func (rl ResourceList) GetResources() []Resource { + if rl.IsEmpty() { + return []Resource{} + } + + result := make([]Resource, len(rl.resources)) + for i, q := range rl.resources { + result[i] = Resource{ + Name: rl.factory.indexToName[i], + Value: q, + Scale: rl.factory.scales[i], + } + } + return result +} + +func (rl ResourceList) AllZero() bool { + if rl.IsEmpty() { + return true + } + for _, r := range rl.resources { + if r != 0 { + return false + } + } + return true +} + +func (rl ResourceList) HasNegativeValues() bool { + if rl.IsEmpty() { + return false + } + for _, r := range rl.resources { + if r < 0 { + return true + } + } + return false +} + +func (rl ResourceList) IsEmpty() bool { + return rl.factory == nil +} + +// ExceedsAvailable +// - if any resource in this ResourceList is greater than the equivalent resource in param available, this function returns +// - the name of the relevant resource +// - the amount of the relevant resource in available +// - the amount of the relevant resource in this ResourceList +// - true +// +// - if no resources in this ResourceList exceed available, the last return value is false. +// - empty resource lists are considered equivalent to all zero. +func (rl ResourceList) ExceedsAvailable(available ResourceList) (string, k8sResource.Quantity, k8sResource.Quantity, bool) { + if rl.IsEmpty() && available.IsEmpty() { + return "", k8sResource.Quantity{}, k8sResource.Quantity{}, false + } + + if available.factory != nil && rl.factory != nil && rl.factory != available.factory { + panic("mismatched ResourceListFactory") + } + + var factory *ResourceListFactory + if available.IsEmpty() { + factory = rl.factory + } else { + factory = available.factory + } + + availableResources := resourcesZeroIfEmpty(available.resources, factory) + requiredResources := resourcesZeroIfEmpty(rl.resources, factory) + + for i, requiredQuantity := range requiredResources { + availableQuantity := availableResources[i] + if requiredQuantity > availableQuantity { + return factory.indexToName[i], *available.asQuantity(i), *rl.asQuantity(i), true + } + } + return "", k8sResource.Quantity{}, k8sResource.Quantity{}, false +} + +func (rl ResourceList) Add(other ResourceList) ResourceList { + if rl.IsEmpty() { + return other + } + if other.IsEmpty() { + return rl + } + if rl.factory != other.factory { + panic("mismatched ResourceListFactory") + } + result := make([]int64, len(rl.resources)) + for i, r := range rl.resources { + result[i] = r + other.resources[i] + } + return ResourceList{factory: rl.factory, resources: result} +} + +func (rl ResourceList) Subtract(other ResourceList) ResourceList { + if other.IsEmpty() { + return rl + } + if rl.IsEmpty() { + return other.Negate() + } + if rl.factory != other.factory { + panic("mismatched ResourceListFactory") + } + result := make([]int64, len(rl.resources)) + for i, r := range rl.resources { + result[i] = r - other.resources[i] + } + return ResourceList{factory: rl.factory, resources: result} +} + +func (rl ResourceList) Negate() ResourceList { + if rl.IsEmpty() { + return rl + } + result := make([]int64, len(rl.resources)) + for i, r := range rl.resources { + result[i] = -r + } + return ResourceList{factory: rl.factory, resources: result} +} + +func (rl ResourceList) asQuantity(index int) *k8sResource.Quantity { + if rl.factory == nil { + return &k8sResource.Quantity{} + } + return k8sResource.NewScaledQuantity(rl.resources[index], rl.factory.scales[index]) +} + +func resourcesZeroIfEmpty(resources []int64, factory *ResourceListFactory) []int64 { + if resources == nil { + return make([]int64, len(factory.indexToName)) + } + return resources +} diff --git a/internal/scheduler/internaltypes/resource_list_factory.go b/internal/scheduler/internaltypes/resource_list_factory.go new file mode 100644 index 00000000000..c610b8040dc --- /dev/null +++ b/internal/scheduler/internaltypes/resource_list_factory.go @@ -0,0 +1,121 @@ +package internaltypes + +import ( + "fmt" + "math" + + "github.com/pkg/errors" + + v1 "k8s.io/api/core/v1" + k8sResource "k8s.io/apimachinery/pkg/api/resource" + + "github.com/armadaproject/armada/internal/scheduler/configuration" +) + +type ResourceListFactory struct { + nameToIndex map[string]int + indexToName []string + scales []k8sResource.Scale +} + +func MakeResourceListFactory(supportedResourceTypes []configuration.ResourceType) (*ResourceListFactory, error) { + if len(supportedResourceTypes) == 0 { + return nil, errors.New("no resource types configured") + } + indexToName := make([]string, len(supportedResourceTypes)) + nameToIndex := make(map[string]int, len(supportedResourceTypes)) + scales := make([]k8sResource.Scale, len(supportedResourceTypes)) + for i, t := range supportedResourceTypes { + if _, exists := nameToIndex[t.Name]; exists { + return nil, fmt.Errorf("duplicate resource type name %q", t.Name) + } + nameToIndex[t.Name] = i + indexToName[i] = t.Name + scales[i] = resolutionToScale(t.Resolution) + } + return &ResourceListFactory{ + indexToName: indexToName, + nameToIndex: nameToIndex, + scales: scales, + }, nil +} + +// Convert resolution to a k8sResource.Scale +// e.g. +// 1 -> 0 +// 0.001 -> -3 +// 1000 -> 3 +func resolutionToScale(resolution k8sResource.Quantity) k8sResource.Scale { + if resolution.Sign() < 1 { + return k8sResource.Milli + } + return k8sResource.Scale(math.Floor(math.Log10(resolution.AsApproximateFloat64()))) +} + +func (factory *ResourceListFactory) MakeAllZero() ResourceList { + result := make([]int64, len(factory.indexToName)) + return ResourceList{resources: result, factory: factory} +} + +// Ignore unknown resources, round down. +func (factory *ResourceListFactory) FromNodeProto(resources map[string]k8sResource.Quantity) ResourceList { + result := make([]int64, len(factory.indexToName)) + for k, v := range resources { + index, ok := factory.nameToIndex[k] + if ok { + result[index] = QuantityToInt64RoundDown(v, factory.scales[index]) + } + } + return ResourceList{resources: result, factory: factory} +} + +// Ignore unknown resources, round up. +func (factory *ResourceListFactory) FromJobResourceListIgnoreUnknown(resources map[string]k8sResource.Quantity) ResourceList { + result := make([]int64, len(factory.indexToName)) + for k, v := range resources { + index, ok := factory.nameToIndex[k] + if ok { + result[index] = QuantityToInt64RoundUp(v, factory.scales[index]) + } + } + return ResourceList{resources: result, factory: factory} +} + +// Fail on unknown resources, round up. +func (factory *ResourceListFactory) FromJobResourceListFailOnUnknown(resources v1.ResourceList) (ResourceList, error) { + if resources == nil { + return ResourceList{}, nil + } + result := make([]int64, len(factory.indexToName)) + for k, v := range resources { + index, ok := factory.nameToIndex[string(k)] + if ok { + result[index] = QuantityToInt64RoundUp(v, factory.scales[index]) + } else { + return ResourceList{}, fmt.Errorf("resource type %q is not supported (if you want to use it add to scheduling.supportedResourceTypes in the armada scheduler config)", string(k)) + } + } + return ResourceList{resources: result, factory: factory}, nil +} + +func (factory *ResourceListFactory) SummaryString() string { + result := "" + for i, name := range factory.indexToName { + if i > 0 { + result += " " + } + scale := factory.scales[i] + resolution := k8sResource.NewScaledQuantity(1, scale) + maxValue := k8sResource.NewScaledQuantity(math.MaxInt64, scale) + result += fmt.Sprintf("%s (scale %v, resolution %v, maxValue %f)", name, scale, resolution, maxValue.AsApproximateFloat64()) + } + return result +} + +func (factory *ResourceListFactory) GetScale(resourceTypeName string) (k8sResource.Scale, error) { + index, ok := factory.nameToIndex[resourceTypeName] + if !ok { + return 0, fmt.Errorf("unknown resource type %q", resourceTypeName) + } + return factory.scales[index], nil +} diff --git a/internal/scheduler/internaltypes/resource_list_factory_test.go b/internal/scheduler/internaltypes/resource_list_factory_test.go new file mode 100644 index 00000000000..5224a553e7b --- /dev/null +++ b/internal/scheduler/internaltypes/resource_list_factory_test.go @@ -0,0 +1,125 @@ +package internaltypes + +import ( + "math" + "testing" + + v1 "k8s.io/api/core/v1" + + "github.com/stretchr/testify/assert" + k8sResource "k8s.io/apimachinery/pkg/api/resource" + + "github.com/armadaproject/armada/internal/scheduler/configuration" +) + +func TestMakeResourceListFactory(t *testing.T) { + factory := testFactory() + + assert.Equal(t, []string{"memory", "ephemeral-storage", "cpu", "nvidia.com/gpu"}, factory.indexToName) + assert.Equal(t, map[string]int{"memory": 0, "ephemeral-storage": 1, "cpu": 2, "nvidia.com/gpu": 3}, factory.nameToIndex) + assert.Equal(t, []k8sResource.Scale{0, 0, k8sResource.Milli, k8sResource.Milli}, factory.scales) +} + +func TestResolutionToScale(t *testing.T) { + assert.Equal(t, k8sResource.Scale(0), resolutionToScale(k8sResource.MustParse("1"))) + assert.Equal(t, k8sResource.Scale(-3), resolutionToScale(k8sResource.MustParse("0.001"))) + assert.Equal(t, k8sResource.Scale(-3), resolutionToScale(k8sResource.MustParse("0.0011"))) + assert.Equal(t, k8sResource.Scale(-4), resolutionToScale(k8sResource.MustParse("0.00099"))) + assert.Equal(t, k8sResource.Scale(3), resolutionToScale(k8sResource.MustParse("1000"))) +} + +func TestResolutionToScaleDefaultsCorrectly(t *testing.T) { + defaultValue := k8sResource.Scale(-3) + assert.Equal(t, defaultValue, resolutionToScale(k8sResource.MustParse("0"))) + assert.Equal(t, defaultValue, k8sResource.Scale(-3), resolutionToScale(k8sResource.MustParse("-1"))) +} + +func TestFromNodeProto(t *testing.T) { + factory := testFactory() + result := factory.FromNodeProto(map[string]k8sResource.Quantity{ + "memory": k8sResource.MustParse("100Mi"), + "cpu": k8sResource.MustParse("9999999n"), + "missing": k8sResource.MustParse("200Mi"), // should ignore missing + }) + assert.Equal(t, int64(100*1024*1024), testGet(&result, "memory")) + assert.Equal(t, int64(9), testGet(&result, "cpu")) + assert.Equal(t, int64(0), testGet(&result, "nvidia.com/gpu")) +} + +func TestFromJobResourceListFailOnUnknown(t *testing.T) { + factory := testFactory() + result, err := factory.FromJobResourceListFailOnUnknown(map[v1.ResourceName]k8sResource.Quantity{ + "memory": k8sResource.MustParse("100Mi"), + "cpu": k8sResource.MustParse("9999999n"), + }) + assert.Nil(t, err) + assert.Equal(t, int64(100*1024*1024), testGet(&result, "memory")) + assert.Equal(t, int64(10), testGet(&result, "cpu")) + assert.Equal(t, int64(0), testGet(&result, "nvidia.com/gpu")) +} + +func TestFromJobResourceListFailOnUnknownErrorsIfMissing(t *testing.T) { + factory := testFactory() + _, err := factory.FromJobResourceListFailOnUnknown(map[v1.ResourceName]k8sResource.Quantity{ + "memory": k8sResource.MustParse("100Mi"), + "missing": k8sResource.MustParse("1"), + }) + assert.NotNil(t, err) +} + +func TestFromJobResourceListIgnoreUnknown(t *testing.T) { + factory := testFactory() + result := factory.FromJobResourceListIgnoreUnknown(map[string]k8sResource.Quantity{ + "memory": k8sResource.MustParse("100Mi"), + "cpu": k8sResource.MustParse("9999999n"), + }) + assert.Equal(t, int64(100*1024*1024), testGet(&result, "memory")) + assert.Equal(t, int64(10), testGet(&result, "cpu")) + assert.Equal(t, int64(0), testGet(&result, "nvidia.com/gpu")) +} + +func TestFromJobResourceListIgnoreUnknownDoesNotErrorIfMissing(t *testing.T) { + factory := testFactory() + result := factory.FromJobResourceListIgnoreUnknown(map[string]k8sResource.Quantity{ + "memory": k8sResource.MustParse("100Mi"), + "missing": k8sResource.MustParse("1"), + }) + assert.Equal(t, int64(100*1024*1024), testGet(&result, "memory")) +} + +func TestGetScale(t *testing.T) { + factory := testFactory() + + scale, err := factory.GetScale("memory") + assert.Nil(t, err) + assert.Equal(t, k8sResource.Scale(0), scale) + + scale, err = factory.GetScale("cpu") + assert.Nil(t, err) + assert.Equal(t, k8sResource.Milli, scale) +} + +func TestGetScaleFailsOnUnknown(t *testing.T) { + factory := testFactory() + + _, err := factory.GetScale("missing") + assert.NotNil(t, err) +} + +func testFactory() *ResourceListFactory { + factory, _ := MakeResourceListFactory([]configuration.ResourceType{ + {Name: "memory", Resolution: k8sResource.MustParse("1")}, + {Name: "ephemeral-storage", Resolution: k8sResource.MustParse("1")}, + {Name: "cpu", Resolution: k8sResource.MustParse("1m")}, + {Name: "nvidia.com/gpu", Resolution: k8sResource.MustParse("1m")}, + }) + return factory +} + +func testGet(rl *ResourceList, name string) int64 { + val, err := rl.GetByName(name) + if err != nil { + return math.MinInt64 + } + return val +} diff --git a/internal/scheduler/internaltypes/resource_list_test.go b/internal/scheduler/internaltypes/resource_list_test.go new file mode 100644 index 00000000000..b48b4aab57c --- /dev/null +++ b/internal/scheduler/internaltypes/resource_list_test.go @@ -0,0 +1,214 @@ +package internaltypes + +import ( + "testing" + + "github.com/stretchr/testify/assert" + k8sResource "k8s.io/apimachinery/pkg/api/resource" +) + +func TestEqual(t *testing.T) { + factory := testFactory() + + a := testResourceList(factory, "1", "1Gi") + b := testResourceList(factory, "1", "1Gi") + c := testResourceList(factory, "1", "2Gi") + assert.True(t, a.Equal(b)) + assert.True(t, a.Equal(a)) + assert.False(t, a.Equal(c)) + assert.False(t, c.Equal(a)) +} + +func TestEqual_HandlesEmptyCorrectly(t *testing.T) { + factory := testFactory() + + a := testResourceList(factory, "1", "1Gi") + e1 := ResourceList{} + e2 := ResourceList{} + + assert.True(t, e1.Equal(e2)) + assert.True(t, e1.Equal(e1)) + + assert.False(t, a.Equal(e1)) + assert.False(t, e1.Equal(a)) +} + +func TestGetByName(t *testing.T) { + factory := testFactory() + a := testResourceList(factory, "1", "1Gi") + + cpu, err := a.GetByName("cpu") + assert.Nil(t, err) + assert.Equal(t, int64(1000), cpu) + + _, err = a.GetByName("missing") + assert.NotNil(t, err) +} + +func TestGetByName_HandlesEmptyCorrectly(t *testing.T) { + empty := ResourceList{} + _, err := empty.GetByName("cpu") + assert.NotNil(t, err) +} + +func TestGetByNameZeroIfMissing(t *testing.T) { + factory := testFactory() + a := testResourceList(factory, "1", "1Gi") + + assert.Equal(t, int64(1000), a.GetByNameZeroIfMissing("cpu")) + assert.Equal(t, int64(0), a.GetByNameZeroIfMissing("missing")) +} + +func TestGetByNameZeroIfMissing_HandlesEmptyCorrectly(t *testing.T) { + empty := ResourceList{} + assert.Equal(t, int64(0), empty.GetByNameZeroIfMissing("missing")) +} + +func TestGetResources(t *testing.T) { + factory := testFactory() + a := testResourceList(factory, "1", "1Gi") + + expected := []Resource{ + {Name: "memory", Value: 1024 * 1024 * 1024, Scale: k8sResource.Scale(0)}, + {Name: "ephemeral-storage", Value: 0, Scale: k8sResource.Scale(0)}, + {Name: "cpu", Value: 1000, Scale: k8sResource.Milli}, + {Name: "nvidia.com/gpu", Value: 0, Scale: k8sResource.Milli}, + } + assert.Equal(t, expected, a.GetResources()) +} + +func TestGetResources_HandlesEmptyCorrectly(t *testing.T) { + empty := ResourceList{} + assert.Equal(t, 0, len(empty.GetResources())) +} + +func TestAllZero(t *testing.T) { + factory := testFactory() + assert.True(t, testResourceList(factory, "0", "0").AllZero()) + assert.False(t, testResourceList(factory, "1", "0").AllZero()) + assert.False(t, testResourceList(factory, "1", "1").AllZero()) + assert.False(t, testResourceList(factory, "0", "-1").AllZero()) +} + +func TestAllZero_HandlesEmptyCorrectly(t *testing.T) { + empty := ResourceList{} + assert.True(t, empty.AllZero()) +} + +func TestHasNegativeValues(t *testing.T) { + factory := testFactory() + assert.False(t, testResourceList(factory, "0", "0").HasNegativeValues()) + assert.False(t, testResourceList(factory, "1", "0").HasNegativeValues()) + assert.True(t, testResourceList(factory, "-1", "1").HasNegativeValues()) + assert.True(t, testResourceList(factory, "-1", "-1").HasNegativeValues()) +} + +func TestHasNegativeValues_HandlesEmptyCorrectly(t *testing.T) { + empty := ResourceList{} + assert.False(t, empty.HasNegativeValues()) +} + +func TestIsEmpty(t *testing.T) { + factory := testFactory() + + nonZero := testResourceList(factory, "1", "1") + zero := testResourceList(factory, "0", "0") + empty := ResourceList{} + + assert.False(t, nonZero.IsEmpty()) + assert.False(t, zero.IsEmpty()) + assert.True(t, empty.IsEmpty()) +} + +func TestExceedsAvailable(t *testing.T) { + factory := testFactory() + + message, _, _, exceeds := testResourceList(factory, "1", "1").ExceedsAvailable(testResourceList(factory, "2", "1")) + assert.Equal(t, "", message) + assert.False(t, exceeds) + + message, availableReturned, requiredReturned, exceeds := testResourceList(factory, "10", "1").ExceedsAvailable(testResourceList(factory, "5", "2")) + assert.Equal(t, "cpu", message) + assert.True(t, exceeds) + assert.Equal(t, k8sResource.NewScaledQuantity(10000, k8sResource.Milli), &requiredReturned) + assert.Equal(t, k8sResource.NewScaledQuantity(5000, k8sResource.Milli), &availableReturned) +} + +func TestExceedsAvailable_HandlesEmptyCorrectly(t *testing.T) { + factory := testFactory() + empty := ResourceList{} + notEmpty := testResourceList(factory, "1", "1Ki") + notEmptyNegative := testResourceList(factory, "-1", "-1Ki") + + message, _, _, exceeds := empty.ExceedsAvailable(empty) + assert.Equal(t, "", message) + assert.False(t, exceeds) + + message, _, _, exceeds = empty.ExceedsAvailable(notEmpty) + assert.Equal(t, "", message) + assert.False(t, exceeds) + + message, availableReturned, requiredReturned, exceeds := notEmpty.ExceedsAvailable(empty) + assert.Equal(t, "memory", message) + assert.True(t, exceeds) + assert.Equal(t, k8sResource.NewScaledQuantity(1024, 0), &requiredReturned) + assert.Equal(t, k8sResource.Quantity{}, availableReturned) + + message, _, _, exceeds = notEmptyNegative.ExceedsAvailable(empty) + assert.Equal(t, "", message) + assert.False(t, exceeds) + + message, availableReturned, requiredReturned, exceeds = empty.ExceedsAvailable(notEmptyNegative) + assert.Equal(t, "memory", message) + assert.True(t, exceeds) + assert.Equal(t, k8sResource.NewScaledQuantity(-1024, 0), &availableReturned) + assert.Equal(t, k8sResource.Quantity{}, requiredReturned) +} + +func TestAdd(t *testing.T) { + factory := testFactory() + + assert.Equal(t, testResourceList(factory, "3", "6Ki"), testResourceList(factory, "1", "2Ki").Add(testResourceList(factory, "2", "4Ki"))) + assert.Equal(t, testResourceList(factory, "-1", "-2Ki"), testResourceList(factory, "1", "2Ki").Add(testResourceList(factory, "-2", "-4Ki"))) +} + +func TestAdd_HandlesEmptyCorrectly(t *testing.T) { + factory := testFactory() + + assert.Equal(t, testResourceList(factory, "1", "1Ki"), testResourceList(factory, "1", "1Ki").Add(ResourceList{})) + assert.Equal(t, testResourceList(factory, "1", "1Ki"), ResourceList{}.Add(testResourceList(factory, "1", "1Ki"))) + assert.Equal(t, ResourceList{}, ResourceList{}.Add(ResourceList{})) +} + +func TestSubtract(t *testing.T) { + factory := testFactory() + + assert.Equal(t, testResourceList(factory, "1", "1Ki"), testResourceList(factory, "2", "2Ki").Subtract(testResourceList(factory, "1", "1Ki"))) + assert.Equal(t, testResourceList(factory, "-1", "-1Ki"), testResourceList(factory, "1", "1Ki").Subtract(testResourceList(factory, "2", "2Ki"))) +} + +func TestSubtract_HandlesEmptyCorrectly(t *testing.T) { + factory := testFactory() + + assert.Equal(t, testResourceList(factory, "1", "1Ki"), testResourceList(factory, "1", "1Ki").Subtract(ResourceList{})) + assert.Equal(t, testResourceList(factory, "-1", "-1Ki"), ResourceList{}.Subtract(testResourceList(factory, "1", "1Ki"))) + assert.Equal(t, ResourceList{}, ResourceList{}.Subtract(ResourceList{})) +} + +func TestNegate(t *testing.T) { + factory := testFactory() + + assert.Equal(t, testResourceList(factory, "-1", "-1Ki"), testResourceList(factory, "1", "1Ki").Negate()) + assert.Equal(t, testResourceList(factory, "1", "1Ki"), testResourceList(factory, "-1", "-1Ki").Negate()) +} + +func TestNegate_HandlesEmptyCorrectly(t *testing.T) { + assert.Equal(t, ResourceList{}, ResourceList{}.Negate()) +} + +func testResourceList(factory *ResourceListFactory, cpu string, memory string) ResourceList { + return factory.FromJobResourceListIgnoreUnknown(map[string]k8sResource.Quantity{ + "cpu": k8sResource.MustParse(cpu), + "memory": k8sResource.MustParse(memory), + }) +} diff --git a/internal/scheduler/jobdb/comparison.go b/internal/scheduler/jobdb/comparison.go index c7278cf1132..f8876edef3c 100644 --- a/internal/scheduler/jobdb/comparison.go +++ b/internal/scheduler/jobdb/comparison.go @@ -1,58 +1,20 @@ package jobdb -import ( - "time" - - "github.com/armadaproject/armada/internal/scheduler/interfaces" -) - type ( JobPriorityComparer struct{} - JobQueueTtlComparer struct{} + JobIdHasher struct{} ) -// Compare jobs by their remaining queue time before expiry -// Invariants: -// - Job.queueTtl must be > 0 -// - Job.created must be < `t` -func (j JobQueueTtlComparer) Compare(a, b *Job) int { - // Jobs with equal id are always considered equal. - // This ensures at most one job with a particular id can exist in the jobDb. - if a.id == b.id { - return 0 - } - - // TODO: Calling time.Now() here doesn't sound right. We should probably sort by earliest expiry time. - timeSeconds := time.Now().UTC().Unix() - aDuration := timeSeconds - (a.submittedTime / 1_000_000_000) - bDuration := timeSeconds - (b.submittedTime / 1_000_000_000) - - aRemaining := max(0, a.GetQueueTtlSeconds()-aDuration) - bRemaining := max(0, b.GetQueueTtlSeconds()-bDuration) - - // If jobs have different ttl remaining, they are ordered by remaining queue ttl - the smallest ttl first. - if aRemaining != bRemaining { - if aRemaining < bRemaining { - return -1 - } else { - return 1 - } - } - - // Tie-break by logical creation timestamp. - if a.id < b.id { - return -1 - } else if a.id > b.id { - return 1 +func (JobIdHasher) Hash(j *Job) uint32 { + var hash uint32 + for _, c := range j.id { + hash = 31*hash + uint32(c) } - panic("We should never get here. Since we check for job id equality at the top of this function.") + return hash } -func max(x, y int64) int64 { - if x < y { - return y - } - return x +func (JobIdHasher) Equal(a, b *Job) bool { + return a == b } func (JobPriorityComparer) Compare(job, other *Job) int { @@ -60,10 +22,10 @@ func (JobPriorityComparer) Compare(job, other *Job) int { } // SchedulingOrderCompare defines the order in which jobs in a particular queue should be scheduled, -func (job *Job) SchedulingOrderCompare(other interfaces.LegacySchedulerJob) int { +func (job *Job) SchedulingOrderCompare(other *Job) int { // We need this cast for now to expose this method via an interface. // This is safe since we only ever compare jobs of the same type. - return SchedulingOrderCompare(job, other.(*Job)) + return SchedulingOrderCompare(job, other) } // SchedulingOrderCompare defines the order in which jobs in a queue should be scheduled diff --git a/internal/scheduler/jobdb/comparison_test.go b/internal/scheduler/jobdb/comparison_test.go index 5ebcf816be8..875c139ab39 100644 --- a/internal/scheduler/jobdb/comparison_test.go +++ b/internal/scheduler/jobdb/comparison_test.go @@ -52,7 +52,7 @@ func TestJobPriorityComparer(t *testing.T) { }, "Running jobs come before queued jobs": { a: &Job{id: "a", priority: 1}, - b: (&Job{id: "b", priority: 2, jobDb: NewJobDb(map[string]types.PriorityClass{"foo": {}}, "foo", stringinterner.New(1))}).WithNewRun("", "", "", 0), + b: (&Job{id: "b", priority: 2, jobDb: NewJobDb(map[string]types.PriorityClass{"foo": {}}, "foo", stringinterner.New(1), TestResourceListFactory)}).WithNewRun("", "", "", 0), expected: 1, }, "Running jobs are ordered third by runtime": { diff --git a/internal/scheduler/jobdb/job.go b/internal/scheduler/jobdb/job.go index 90482f9d59a..8f9e8bdb7d9 100644 --- a/internal/scheduler/jobdb/job.go +++ b/internal/scheduler/jobdb/job.go @@ -9,11 +9,12 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" armadamaps "github.com/armadaproject/armada/internal/common/maps" "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -39,6 +40,9 @@ type Job struct { submittedTime int64 // Hash of the scheduling requirements of the job. schedulingKey schedulerobjects.SchedulingKey + // True if the job has been validated by the scheduler. + // Any job that fails validation will be rejected. + validated bool // True if the job is currently queued. // If this is set then the job will not be considered for scheduling. queued bool @@ -46,6 +50,8 @@ type Job struct { queuedVersion int32 // Scheduling requirements of this job. jobSchedulingInfo *schedulerobjects.JobSchedulingInfo + // Resource requirements of this job stored in efficient form. + resourceRequirements internaltypes.ResourceList // Priority class of this job. Populated automatically on job creation. priorityClass types.PriorityClass // True if the user has requested this job be cancelled @@ -64,6 +70,8 @@ type Job struct { activeRun *JobRun // The timestamp of the currently active run. activeRunTimestamp int64 + // Pools for which the job is eligible. This is used for metrics reporting and to calculate demand for fair share + pools []string } func (job *Job) String() string { @@ -297,7 +305,7 @@ func (job *Job) Equal(other *Job) bool { return false } if job.schedulingKey != other.schedulingKey { - // Assume jobSchedulingInfo is equal if schedulingKey is equal. + // Assume jobSchedulingInfo/resourceRequirements are equal if schedulingKey is equal. return false } if job.queued != other.queued { @@ -315,6 +323,9 @@ func (job *Job) Equal(other *Job) bool { if job.queuedVersion != other.queuedVersion { return false } + if job.validated != other.validated { + return false + } if job.cancelRequested != other.cancelRequested { return false } @@ -336,6 +347,9 @@ func (job *Job) Equal(other *Job) bool { if job.activeRunTimestamp != other.activeRunTimestamp { return false } + if !slices.Equal(job.pools, other.pools) { + return false + } if !armadamaps.DeepEqual(job.runsById, other.runsById) { return false } @@ -347,58 +361,33 @@ func (job *Job) Id() string { return job.id } -// GetId returns the id of the Job. -// This is needed for the LegacyJob interface. -func (job *Job) GetId() string { - return job.id -} - // Jobset returns the jobSet the job belongs to. func (job *Job) Jobset() string { return job.jobSet } -// GetJobSet returns the jobSet the job belongs to. -// This is needed for compatibility with legacyJob -func (job *Job) GetJobSet() string { - return job.jobSet -} - // Queue returns the queue this job belongs to. func (job *Job) Queue() string { return job.queue } -// GetQueue returns the queue this job belongs to. -// This is needed for the LegacyJob interface. -func (job *Job) GetQueue() string { - return job.queue -} - // Priority returns the priority of the job. func (job *Job) Priority() uint32 { return job.priority } -// Priority returns the priority class of the job. -func (job *Job) GetPriorityClass() types.PriorityClass { +// PriorityClass returns the priority class of the job. +func (job *Job) PriorityClass() types.PriorityClass { return job.priorityClass } -// GetSchedulingKey returns the scheduling key associated with a job. -// The second return value is always true since scheduling keys are computed at job creation time. -// This is needed for compatibility with interfaces.LegacySchedulerJob. -func (job *Job) GetSchedulingKey() (schedulerobjects.SchedulingKey, bool) { - return job.schedulingKey, true +// SchedulingKey returns the scheduling key associated with a job. +func (job *Job) SchedulingKey() schedulerobjects.SchedulingKey { + return job.schedulingKey } -// GetPerQueuePriority exists for compatibility with the LegacyJob interface. -func (job *Job) GetPerQueuePriority() uint32 { - return job.priority -} - -// GetSubmitTime exists for compatibility with the LegacyJob interface. -func (job *Job) GetSubmitTime() time.Time { +// SubmitTime exists for compatibility with the LegacyJob interface. +func (job *Job) SubmitTime() time.Time { if job.jobSchedulingInfo == nil { return time.Time{} } @@ -410,6 +399,11 @@ func (job *Job) RequestedPriority() uint32 { return job.requestedPriority } +// Pools returns the pools associated with the job +func (job *Job) Pools() []string { + return slices.Clone(job.pools) +} + // WithPriority returns a copy of the job with the priority updated. func (job *Job) WithPriority(priority uint32) *Job { j := copyJob(*job) @@ -417,6 +411,20 @@ func (job *Job) WithPriority(priority uint32) *Job { return j } +// WithPools returns a copy of the job with the pools updated. +func (job *Job) WithPools(pools []string) *Job { + j := copyJob(*job) + j.pools = slices.Clone(pools) + return j +} + +// WithPriorityClass returns a copy of the job with the priority class updated. +func (job *Job) WithPriorityClass(priorityClass types.PriorityClass) *Job { + j := copyJob(*job) + j.priorityClass = priorityClass + return j +} + // WithSubmittedTime returns a copy of the job with submittedTime updated. func (job *Job) WithSubmittedTime(submittedTime int64) *Job { j := copyJob(*job) @@ -436,24 +444,26 @@ func (job *Job) JobSchedulingInfo() *schedulerobjects.JobSchedulingInfo { return job.jobSchedulingInfo } -// GetAnnotations returns the annotations on the job. -// This is needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetAnnotations() map[string]string { +// Annotations returns the annotations on the job. +func (job *Job) Annotations() map[string]string { if req := job.PodRequirements(); req != nil { return req.Annotations } return nil } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetPriorityClassName() string { +// PriorityClassName returns the name of the job's Priority Class +// TODO: this can be inconsistent with job.PriorityClass() +func (job *Job) PriorityClassName() string { if schedulingInfo := job.JobSchedulingInfo(); schedulingInfo != nil { return schedulingInfo.PriorityClassName } return "" } -func (job *Job) GetScheduledAtPriority() (int32, bool) { +// ScheduledAtPriority returns the numeric priority at which the job was scheduled +// This will return false if the job has not been scheduled yet +func (job *Job) ScheduledAtPriority() (int32, bool) { run := job.LatestRun() if run == nil { return -1, false @@ -465,52 +475,49 @@ func (job *Job) GetScheduledAtPriority() (int32, bool) { return *scheduledAtPriority, true } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetNodeSelector() map[string]string { +// NodeSelector returns the Node Selector requested by the Job +func (job *Job) NodeSelector() map[string]string { if req := job.PodRequirements(); req != nil { return req.NodeSelector } return nil } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetAffinity() *v1.Affinity { +// Affinity returns the Affinity requested by the Job +func (job *Job) Affinity() *v1.Affinity { if req := job.PodRequirements(); req != nil { return req.Affinity } return nil } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetTolerations() []v1.Toleration { +// Tolerations returns the Tolerations requested by the Job +func (job *Job) Tolerations() []v1.Toleration { if req := job.PodRequirements(); req != nil { return req.Tolerations } return nil } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetResourceRequirements() v1.ResourceRequirements { +// ResourceRequirements returns the resource requirements of the Job +// EfficientResourceRequirements below is preferred +func (job *Job) ResourceRequirements() v1.ResourceRequirements { if req := job.PodRequirements(); req != nil { return req.ResourceRequirements } return v1.ResourceRequirements{} } -// Needed for compatibility with interfaces.LegacySchedulerJob -func (job *Job) GetQueueTtlSeconds() int64 { - return job.jobSchedulingInfo.QueueTtlSeconds +// EfficientResourceRequirements gets resource requirements as an efficient internaltypes.ResourceList +func (job *Job) EfficientResourceRequirements() internaltypes.ResourceList { + return job.resourceRequirements } +// PodRequirements returns the pod requirements of the Job func (job *Job) PodRequirements() *schedulerobjects.PodRequirements { return job.jobSchedulingInfo.GetPodRequirements() } -// GetPodRequirements is needed for compatibility with interfaces.LegacySchedulerJob. -func (job *Job) GetPodRequirements(_ map[string]types.PriorityClass) *schedulerobjects.PodRequirements { - return job.PodRequirements() -} - // Queued returns true if the job should be considered by the scheduler for assignment or false otherwise. func (job *Job) Queued() bool { return job.queued @@ -626,6 +633,21 @@ func (job *Job) HasRuns() bool { return job.activeRun != nil } +func (job *Job) ValidateResourceRequests() error { + pr := job.jobSchedulingInfo.GetPodRequirements() + if pr == nil { + return nil + } + + req := pr.ResourceRequirements.Requests + if req == nil { + return nil + } + + _, err := job.jobDb.resourceListFactory.FromJobResourceListFailOnUnknown(req) + return err +} + // WithNewRun creates a copy of the job with a new run on the given executor. func (job *Job) WithNewRun(executor string, nodeId, nodeName string, scheduledAtPriority int32) *Job { return job.WithUpdatedRun(job.jobDb.CreateRun( @@ -710,28 +732,6 @@ func (job *Job) RunById(id uuid.UUID) *JobRun { return job.runsById[id] } -// HasQueueTtlExpired returns true if the given job has reached its queueTtl expiry. -// Invariants: -// - job.created < `t` -func (job *Job) HasQueueTtlExpired() bool { - ttlSeconds := job.GetQueueTtlSeconds() - if ttlSeconds > 0 { - timeSeconds := time.Now().UTC().Unix() - - // job.Created is populated from the `Submitted` field in postgres, which is a UnixNano time hence the conversion. - createdSeconds := job.submittedTime / 1_000_000_000 - duration := timeSeconds - createdSeconds - return duration > ttlSeconds - } else { - return false - } -} - -// HasQueueTtlSet returns true if the given job has a queueTtl set. -func (job *Job) HasQueueTtlSet() bool { - return job.GetQueueTtlSeconds() > 0 -} - // WithJobset returns a copy of the job with the jobSet updated. func (job *Job) WithJobset(jobset string) *Job { j := copyJob(*job) @@ -753,20 +753,36 @@ func (job *Job) WithCreated(created int64) *Job { return j } +// WithValidated returns a copy of the job with the validated updated. +func (job *Job) WithValidated(validated bool) *Job { + j := copyJob(*job) + j.validated = validated + return j +} + +// Validated returns true if the job has been validated +func (job *Job) Validated() bool { + return job.validated +} + // WithJobSchedulingInfo returns a copy of the job with the job scheduling info updated. -func (job *Job) WithJobSchedulingInfo(jobSchedulingInfo *schedulerobjects.JobSchedulingInfo) *Job { +func (job *Job) WithJobSchedulingInfo(jobSchedulingInfo *schedulerobjects.JobSchedulingInfo) (*Job, error) { j := copyJob(*job) j.jobSchedulingInfo = jobSchedulingInfo j.ensureJobSchedulingInfoFieldsInitialised() // Changing the scheduling info invalidates the scheduling key stored with the job. - j.schedulingKey = interfaces.SchedulingKeyFromLegacySchedulerJob(j.jobDb.schedulingKeyGenerator, j) - return j + j.schedulingKey = SchedulingKeyFromJob(j.jobDb.schedulingKeyGenerator, j) + j.resourceRequirements = job.jobDb.getResourceRequirements(jobSchedulingInfo) + + return j, nil } func (job *Job) DeepCopy() *Job { - copiedSchedulingInfo := proto.Clone(job.JobSchedulingInfo()).(*schedulerobjects.JobSchedulingInfo) - j := job.WithJobSchedulingInfo(copiedSchedulingInfo) + j := copyJob(*job) + j.jobSchedulingInfo = proto.Clone(job.JobSchedulingInfo()).(*schedulerobjects.JobSchedulingInfo) + j.ensureJobSchedulingInfoFieldsInitialised() + j.schedulingKey = SchedulingKeyFromJob(j.jobDb.schedulingKeyGenerator, j) j.runsById = maps.Clone(j.runsById) for key, run := range j.runsById { @@ -779,7 +795,17 @@ func (job *Job) DeepCopy() *Job { return j } -// copyJob makes a copy of the job +// copyJob makes a shallow copy of the job func copyJob(j Job) *Job { return &j } + +func SchedulingKeyFromJob(skg *schedulerobjects.SchedulingKeyGenerator, job *Job) schedulerobjects.SchedulingKey { + return skg.Key( + job.NodeSelector(), + job.Affinity(), + job.Tolerations(), + job.ResourceRequirements().Requests, + job.PriorityClassName(), + ) +} diff --git a/internal/scheduler/jobdb/job_run_test.go b/internal/scheduler/jobdb/job_run_test.go index 497025a3c6f..f00ee4b0782 100644 --- a/internal/scheduler/jobdb/job_run_test.go +++ b/internal/scheduler/jobdb/job_run_test.go @@ -34,6 +34,7 @@ var ( TestDefaultPriorityClass, SchedulingKeyGenerator, stringinterner.New(1024), + MakeTestResourceListFactory(), ) scheduledAtPriority = int32(5) ) diff --git a/internal/scheduler/jobdb/job_test.go b/internal/scheduler/jobdb/job_test.go index 4cbd04cc6f3..7f768f9592f 100644 --- a/internal/scheduler/jobdb/job_test.go +++ b/internal/scheduler/jobdb/job_test.go @@ -7,6 +7,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -24,7 +25,7 @@ var jobSchedulingInfo = &schedulerobjects.JobSchedulingInfo{ }, } -var baseJob = jobDb.NewJob( +var baseJob, _ = jobDb.NewJob( "test-job", "test-jobSet", "test-queue", @@ -36,6 +37,8 @@ var baseJob = jobDb.NewJob( false, false, 3, + false, + []string{}, ) var baseRun = &JobRun{ @@ -52,12 +55,10 @@ var baseRun = &JobRun{ // Test methods that only have getters func TestJob_TestGetter(t *testing.T) { assert.Equal(t, baseJob.id, baseJob.Id()) - assert.Equal(t, baseJob.id, baseJob.GetId()) assert.Equal(t, baseJob.queue, baseJob.Queue()) - assert.Equal(t, baseJob.queue, baseJob.GetQueue()) assert.Equal(t, baseJob.submittedTime, baseJob.Created()) assert.Equal(t, jobSchedulingInfo, baseJob.JobSchedulingInfo()) - assert.Equal(t, baseJob.GetAnnotations(), map[string]string{ + assert.Equal(t, baseJob.Annotations(), map[string]string{ "foo": "bar", }) } @@ -285,6 +286,16 @@ func TestJob_TestWithJobset(t *testing.T) { assert.Equal(t, "fish", newJob.Jobset()) } +func TestJob_TestWithPriorityClass(t *testing.T) { + pc := types.PriorityClass{ + Priority: 100, + Preemptible: true, + } + newJob := baseJob.WithPriorityClass(pc) + assert.Equal(t, types.PriorityClass{Priority: 3, Preemptible: false}, baseJob.PriorityClass()) + assert.Equal(t, pc, newJob.PriorityClass()) +} + func TestJob_TestWithQueue(t *testing.T) { newJob := baseJob.WithQueue("fish") assert.Equal(t, "test-queue", baseJob.Queue()) @@ -298,9 +309,11 @@ func TestJob_TestWithCreated(t *testing.T) { } func TestJob_DeepCopy(t *testing.T) { - original := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3) + original, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) + assert.Nil(t, err) original = original.WithUpdatedRun(baseJobRun.DeepCopy()) - expected := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3) + expected, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) + assert.Nil(t, err) expected = expected.WithUpdatedRun(baseJobRun.DeepCopy()) result := original.DeepCopy() @@ -331,7 +344,7 @@ func TestJob_TestWithJobSchedulingInfo(t *testing.T) { }, }, } - newJob := baseJob.WithJobSchedulingInfo(newSchedInfo) + newJob := JobWithJobSchedulingInfo(baseJob, newSchedInfo) assert.Equal(t, jobSchedulingInfo, baseJob.JobSchedulingInfo()) assert.Equal(t, newSchedInfo, newJob.JobSchedulingInfo()) } @@ -352,13 +365,14 @@ func TestJobSchedulingInfoFieldsInitialised(t *testing.T) { assert.Nil(t, infoWithNilFields.GetPodRequirements().NodeSelector) assert.Nil(t, infoWithNilFields.GetPodRequirements().Annotations) - job := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, infoWithNilFieldsCopy, true, 0, false, false, false, 3) - assert.NotNil(t, job.GetNodeSelector()) - assert.NotNil(t, job.GetAnnotations()) + job, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, infoWithNilFieldsCopy, true, 0, false, false, false, 3, false, []string{}) + assert.Nil(t, err) + assert.NotNil(t, job.NodeSelector()) + assert.NotNil(t, job.Annotations()) // Copy again here, as the fields get mutated so we want a clean copy infoWithNilFieldsCopy2 := proto.Clone(infoWithNilFields).(*schedulerobjects.JobSchedulingInfo) - updatedJob := baseJob.WithJobSchedulingInfo(infoWithNilFieldsCopy2) - assert.NotNil(t, updatedJob.GetNodeSelector()) - assert.NotNil(t, updatedJob.GetAnnotations()) + updatedJob := JobWithJobSchedulingInfo(baseJob, infoWithNilFieldsCopy2) + assert.NotNil(t, updatedJob.NodeSelector()) + assert.NotNil(t, updatedJob.Annotations()) } diff --git a/internal/scheduler/jobdb/jobdb.go b/internal/scheduler/jobdb/jobdb.go index ee066dccc71..8541d599bed 100644 --- a/internal/scheduler/jobdb/jobdb.go +++ b/internal/scheduler/jobdb/jobdb.go @@ -9,24 +9,21 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "golang.org/x/exp/maps" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -var ( - emptyList = immutable.NewSortedSet[*Job](JobPriorityComparer{}) - emptyQueuedJobsByTtl = immutable.NewSortedSet[*Job](JobQueueTtlComparer{}) -) +var emptyList = immutable.NewSortedSet[*Job](JobPriorityComparer{}) type JobDb struct { jobsById *immutable.Map[string, *Job] jobsByRunId *immutable.Map[uuid.UUID, string] jobsByQueue map[string]immutable.SortedSet[*Job] - queuedJobsByTtl *immutable.SortedSet[*Job] + unvalidatedJobs *immutable.Set[*Job] // Configured priority classes. priorityClasses map[string]types.PriorityClass // Priority class assigned to jobs with a priorityClassName not in jobDb.priorityClasses. @@ -42,6 +39,8 @@ type JobDb struct { clock clock.PassiveClock // Used for generating job run ids. uuidProvider UUIDProvider + // Used to make efficient ResourceList types. + resourceListFactory *internaltypes.ResourceListFactory } // UUIDProvider is an interface used to mock UUID generation for tests. @@ -56,12 +55,14 @@ func (_ RealUUIDProvider) New() uuid.UUID { return uuid.New() } -func NewJobDb(priorityClasses map[string]types.PriorityClass, defaultPriorityClassName string, stringInterner *stringinterner.StringInterner) *JobDb { +func NewJobDb(priorityClasses map[string]types.PriorityClass, defaultPriorityClassName string, stringInterner *stringinterner.StringInterner, resourceListFactory *internaltypes.ResourceListFactory, +) *JobDb { return NewJobDbWithSchedulingKeyGenerator( priorityClasses, defaultPriorityClassName, schedulerobjects.NewSchedulingKeyGenerator(), stringInterner, + resourceListFactory, ) } @@ -70,23 +71,26 @@ func NewJobDbWithSchedulingKeyGenerator( defaultPriorityClassName string, skg *schedulerobjects.SchedulingKeyGenerator, stringInterner *stringinterner.StringInterner, + resourceListFactory *internaltypes.ResourceListFactory, ) *JobDb { defaultPriorityClass, ok := priorityClasses[defaultPriorityClassName] if !ok { // TODO(albin): Return an error instead. panic(fmt.Sprintf("unknown default priority class %s", defaultPriorityClassName)) } + unvalidatedJobs := immutable.NewSet[*Job](JobIdHasher{}) return &JobDb{ jobsById: immutable.NewMap[string, *Job](nil), jobsByRunId: immutable.NewMap[uuid.UUID, string](&UUIDHasher{}), jobsByQueue: map[string]immutable.SortedSet[*Job]{}, - queuedJobsByTtl: &emptyQueuedJobsByTtl, + unvalidatedJobs: &unvalidatedJobs, priorityClasses: priorityClasses, defaultPriorityClass: defaultPriorityClass, schedulingKeyGenerator: skg, stringInterner: stringInterner, clock: clock.RealClock{}, uuidProvider: RealUUIDProvider{}, + resourceListFactory: resourceListFactory, } } @@ -104,11 +108,12 @@ func (jobDb *JobDb) Clone() *JobDb { jobsById: jobDb.jobsById, jobsByRunId: jobDb.jobsByRunId, jobsByQueue: maps.Clone(jobDb.jobsByQueue), - queuedJobsByTtl: jobDb.queuedJobsByTtl, + unvalidatedJobs: jobDb.unvalidatedJobs, priorityClasses: jobDb.priorityClasses, defaultPriorityClass: jobDb.defaultPriorityClass, schedulingKeyGenerator: jobDb.schedulingKeyGenerator, stringInterner: jobDb.stringInterner, + resourceListFactory: jobDb.resourceListFactory, } } @@ -126,11 +131,16 @@ func (jobDb *JobDb) NewJob( cancelByJobSetRequested bool, cancelled bool, created int64, -) *Job { + validated bool, + pools []string, +) (*Job, error) { priorityClass, ok := jobDb.priorityClasses[schedulingInfo.PriorityClassName] if !ok { priorityClass = jobDb.defaultPriorityClass } + + rr := jobDb.getResourceRequirements(schedulingInfo) + job := &Job{ jobDb: jobDb, id: jobId, @@ -142,15 +152,32 @@ func (jobDb *JobDb) NewJob( requestedPriority: priority, submittedTime: created, jobSchedulingInfo: jobDb.internJobSchedulingInfoStrings(schedulingInfo), + resourceRequirements: rr, priorityClass: priorityClass, cancelRequested: cancelRequested, cancelByJobSetRequested: cancelByJobSetRequested, cancelled: cancelled, + validated: validated, runsById: map[uuid.UUID]*JobRun{}, + pools: pools, } job.ensureJobSchedulingInfoFieldsInitialised() - job.schedulingKey = interfaces.SchedulingKeyFromLegacySchedulerJob(jobDb.schedulingKeyGenerator, job) - return job + job.schedulingKey = SchedulingKeyFromJob(jobDb.schedulingKeyGenerator, job) + return job, nil +} + +func (jobDb *JobDb) getResourceRequirements(schedulingInfo *schedulerobjects.JobSchedulingInfo) internaltypes.ResourceList { + pr := schedulingInfo.GetPodRequirements() + if pr == nil { + return internaltypes.ResourceList{} + } + + req := pr.ResourceRequirements.Requests + if req == nil { + return internaltypes.ResourceList{} + } + + return jobDb.resourceListFactory.FromJobResourceListIgnoreUnknown(schedulerobjects.ResourceListFromV1ResourceList(req).Resources) } func (jobDb *JobDb) internJobSchedulingInfoStrings(info *schedulerobjects.JobSchedulingInfo) *schedulerobjects.JobSchedulingInfo { @@ -179,7 +206,7 @@ func (jobDb *JobDb) ReadTxn() *Txn { jobsById: jobDb.jobsById, jobsByRunId: jobDb.jobsByRunId, jobsByQueue: jobDb.jobsByQueue, - queuedJobsByTtl: jobDb.queuedJobsByTtl, + unvalidatedJobs: jobDb.unvalidatedJobs, active: true, jobDb: jobDb, } @@ -197,7 +224,7 @@ func (jobDb *JobDb) WriteTxn() *Txn { jobsById: jobDb.jobsById, jobsByRunId: jobDb.jobsByRunId, jobsByQueue: maps.Clone(jobDb.jobsByQueue), - queuedJobsByTtl: jobDb.queuedJobsByTtl, + unvalidatedJobs: jobDb.unvalidatedJobs, active: true, jobDb: jobDb, } @@ -216,9 +243,8 @@ type Txn struct { jobsByRunId *immutable.Map[uuid.UUID, string] // Queued jobs for each queue. Stored in the order in which they should be scheduled. jobsByQueue map[string]immutable.SortedSet[*Job] - // Queued jobs for each queue ordered by remaining time-to-live. - // TODO: The ordering is wrong. Since we call time.Now() in the compare function. - queuedJobsByTtl *immutable.SortedSet[*Job] + // Jobs that require submit checking + unvalidatedJobs *immutable.Set[*Job] // The jobDb from which this transaction was created. jobDb *JobDb // Set to false when this transaction is either committed or aborted. @@ -235,7 +261,8 @@ func (txn *Txn) Commit() { txn.jobDb.jobsById = txn.jobsById txn.jobDb.jobsByRunId = txn.jobsByRunId txn.jobDb.jobsByQueue = txn.jobsByQueue - txn.jobDb.queuedJobsByTtl = txn.queuedJobsByTtl + txn.jobDb.unvalidatedJobs = txn.unvalidatedJobs + txn.active = false } @@ -341,9 +368,8 @@ func (txn *Txn) Upsert(jobs []*Job) error { hasJobs := txn.jobsById.Len() > 0 - // First, delete any jobs to be upserted from the set of queued jobs. - // This to ensure jobs that are no longer queued do not appear in this set. - // Jobs that are still queued will be re-inserted later. + // First, delete any jobs to be upserted from the sets of queued and unvalidated jobs + // We will replace these jobs later if they are still queued if hasJobs { for _, job := range jobs { existingJob, ok := txn.jobsById.Get(job.id) @@ -352,16 +378,17 @@ func (txn *Txn) Upsert(jobs []*Job) error { if ok { txn.jobsByQueue[existingJob.queue] = existingQueue.Delete(existingJob) } - - newQueuedJobsByTtl := txn.queuedJobsByTtl.Delete(existingJob) - txn.queuedJobsByTtl = &newQueuedJobsByTtl + if !existingJob.Validated() { + newUnvalidatedJobs := txn.unvalidatedJobs.Delete(existingJob) + txn.unvalidatedJobs = &newUnvalidatedJobs + } } } } // Now need to insert jobs, runs and queuedJobs. This can be done in parallel. wg := sync.WaitGroup{} - wg.Add(3) + wg.Add(4) // jobs go func() { @@ -412,14 +439,21 @@ func (txn *Txn) Upsert(jobs []*Job) error { } newQueue = newQueue.Add(job) txn.jobsByQueue[job.queue] = newQueue + } + } + }() - if job.HasQueueTtlSet() { - queuedJobsByTtl := txn.queuedJobsByTtl.Add(job) - txn.queuedJobsByTtl = &queuedJobsByTtl - } + // Unvalidated jobs + go func() { + defer wg.Done() + for _, job := range jobs { + if !job.Validated() { + unvalidatedJobs := txn.unvalidatedJobs.Add(job) + txn.unvalidatedJobs = &unvalidatedJobs } } }() + wg.Wait() return nil } @@ -457,13 +491,12 @@ func (txn *Txn) QueuedJobs(queue string) *immutable.SortedSetIterator[*Job] { } } -// QueuedJobsByTtl returns an iterator for jobs ordered by queue ttl time - the closest to expiry first -func (txn *Txn) QueuedJobsByTtl() *immutable.SortedSetIterator[*Job] { - return txn.queuedJobsByTtl.Iterator() +// UnvalidatedJobs returns an iterator for jobs that have not yet been validated +func (txn *Txn) UnvalidatedJobs() *immutable.SetIterator[*Job] { + return txn.unvalidatedJobs.Iterator() } // GetAll returns all jobs in the database. -// The Jobs returned by this function *must not* be subsequently modified func (txn *Txn) GetAll() []*Job { allJobs := make([]*Job, 0, txn.jobsById.Len()) iter := txn.jobsById.Iterator() @@ -500,12 +533,8 @@ func (txn *Txn) delete(jobId string) { newQueue := queue.Delete(job) txn.jobsByQueue[job.queue] = newQueue } - - // We only add these jobs into the collection if it has a queueTtl set, hence only remove if this is set. - if job.HasQueueTtlSet() { - newQueuedJobsByExpiry := txn.queuedJobsByTtl.Delete(job) - txn.queuedJobsByTtl = &newQueuedJobsByExpiry - } + newUnvalidatedJobs := txn.unvalidatedJobs.Delete(job) + txn.unvalidatedJobs = &newUnvalidatedJobs } } diff --git a/internal/scheduler/jobdb/jobdb_test.go b/internal/scheduler/jobdb/jobdb_test.go index d0fb9756eb7..4919456a36a 100644 --- a/internal/scheduler/jobdb/jobdb_test.go +++ b/internal/scheduler/jobdb/jobdb_test.go @@ -2,6 +2,7 @@ package jobdb import ( "math/rand" + "sort" "testing" "github.com/gogo/protobuf/proto" @@ -17,7 +18,6 @@ import ( "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -29,6 +29,7 @@ func NewTestJobDb() *JobDb { }, "foo", stringinterner.New(1024), + TestResourceListFactory, ) } @@ -72,6 +73,28 @@ func TestJobDb_TestGetById(t *testing.T) { assert.Nil(t, txn.GetById(util.NewULID())) } +func TestJobDb_TestGetUnvalidated(t *testing.T) { + jobDb := NewTestJobDb() + job1 := newJob().WithValidated(false) + job2 := newJob().WithValidated(true) + job3 := newJob().WithValidated(false) + txn := jobDb.WriteTxn() + + err := txn.Upsert([]*Job{job1, job2, job3}) + require.NoError(t, err) + + expected := []*Job{job1, job3} + + var actual []*Job + it := txn.UnvalidatedJobs() + for job, _ := it.Next(); job != nil; job, _ = it.Next() { + actual = append(actual, job) + } + sort.SliceStable(actual, func(i, j int) bool { return actual[i].id < actual[j].id }) + sort.SliceStable(expected, func(i, j int) bool { return expected[i].id < expected[j].id }) + assert.Equal(t, expected, actual) +} + func TestJobDb_TestGetByRunId(t *testing.T) { jobDb := NewTestJobDb() job1 := newJob().WithNewRun("executor", "nodeId", "nodeName", 5) @@ -122,7 +145,7 @@ func TestJobDb_TestQueuedJobs(t *testing.T) { require.NoError(t, err) collect := func() []*Job { retrieved := make([]*Job, 0) - iter := txn.QueuedJobs(jobs[0].GetQueue()) + iter := txn.QueuedJobs(jobs[0].Queue()) for !iter.Done() { j, _ := iter.Next() retrieved = append(retrieved, j) @@ -246,11 +269,9 @@ func TestJobDb_SchedulingKeyIsPopulated(t *testing.T) { }, } jobDb := NewTestJobDb() - job := jobDb.NewJob("jobId", "jobSet", "queue", 1, jobSchedulingInfo, false, 0, false, false, false, 2) - - actualSchedulingKey, ok := job.GetSchedulingKey() - require.True(t, ok) - assert.Equal(t, interfaces.SchedulingKeyFromLegacySchedulerJob(jobDb.schedulingKeyGenerator, job), actualSchedulingKey) + job, err := jobDb.NewJob("jobId", "jobSet", "queue", 1, jobSchedulingInfo, false, 0, false, false, false, 2, false, []string{}) + assert.Nil(t, err) + assert.Equal(t, SchedulingKeyFromJob(jobDb.schedulingKeyGenerator, job), job.SchedulingKey()) } func TestJobDb_SchedulingKey(t *testing.T) { @@ -283,14 +304,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -314,14 +335,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -344,14 +365,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -371,14 +392,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abcdef", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -401,14 +422,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -428,14 +449,14 @@ func TestJobDb_SchedulingKey(t *testing.T) { PreemptionPolicy: "abcdef", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("4"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("4"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -457,9 +478,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -478,9 +499,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 2, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -502,9 +523,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -523,10 +544,10 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), - "foo": resource.MustParse("0"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), + "foo": resource.MustParse("0"), }, }, }, @@ -548,9 +569,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -570,9 +591,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -593,9 +614,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -614,9 +635,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -650,9 +671,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -671,9 +692,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -694,9 +715,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -715,9 +736,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -738,9 +759,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -759,9 +780,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -782,9 +803,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -803,9 +824,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -826,9 +847,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -847,9 +868,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -871,9 +892,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("4"), - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), }, }, }, @@ -892,9 +913,9 @@ func TestJobDb_SchedulingKey(t *testing.T) { Priority: 1, ResourceRequirements: v1.ResourceRequirements{ Requests: map[v1.ResourceName]resource.Quantity{ - "memory": resource.MustParse("5"), - "gpu": resource.MustParse("6"), - "cpu": resource.MustParse("4"), + "memory": resource.MustParse("5"), + "nvidia.com/gpu": resource.MustParse("6"), + "cpu": resource.MustParse("4"), }, }, }, @@ -1237,20 +1258,20 @@ func TestJobDb_SchedulingKey(t *testing.T) { jobSchedulingInfoA := proto.Clone(jobSchedulingInfo).(*schedulerobjects.JobSchedulingInfo) jobSchedulingInfoA.PriorityClassName = tc.priorityClassNameA jobSchedulingInfoA.ObjectRequirements[0].Requirements = &schedulerobjects.ObjectRequirements_PodRequirements{PodRequirements: tc.podRequirementsA} - jobA := baseJob.WithJobSchedulingInfo(jobSchedulingInfoA) + jobA := JobWithJobSchedulingInfo(baseJob, jobSchedulingInfoA) jobSchedulingInfoB := proto.Clone(jobSchedulingInfo).(*schedulerobjects.JobSchedulingInfo) jobSchedulingInfoB.PriorityClassName = tc.priorityClassNameB jobSchedulingInfoB.ObjectRequirements[0].Requirements = &schedulerobjects.ObjectRequirements_PodRequirements{PodRequirements: tc.podRequirementsB} - jobB := baseJob.WithJobSchedulingInfo(jobSchedulingInfoB) + jobB := JobWithJobSchedulingInfo(baseJob, jobSchedulingInfoB) - schedulingKeyA := interfaces.SchedulingKeyFromLegacySchedulerJob(skg, jobA) - schedulingKeyB := interfaces.SchedulingKeyFromLegacySchedulerJob(skg, jobB) + schedulingKeyA := SchedulingKeyFromJob(skg, jobA) + schedulingKeyB := SchedulingKeyFromJob(skg, jobB) // Generate the keys several times to check their consistency. for i := 1; i < 10; i++ { - assert.Equal(t, interfaces.SchedulingKeyFromLegacySchedulerJob(skg, jobA), schedulingKeyA) - assert.Equal(t, interfaces.SchedulingKeyFromLegacySchedulerJob(skg, jobB), schedulingKeyB) + assert.Equal(t, SchedulingKeyFromJob(skg, jobA), schedulingKeyA) + assert.Equal(t, SchedulingKeyFromJob(skg, jobB), schedulingKeyB) } if tc.equal { diff --git a/internal/scheduler/jobdb/reconciliation.go b/internal/scheduler/jobdb/reconciliation.go index 7052f3cea64..07e8cd464e6 100644 --- a/internal/scheduler/jobdb/reconciliation.go +++ b/internal/scheduler/jobdb/reconciliation.go @@ -112,6 +112,9 @@ func (jobDb *JobDb) reconcileJobDifferences(job *Job, jobRepoJob *database.Job, } else if job != nil && jobRepoJob == nil { // No direct updates to the job; just process any updated runs below. } else if job != nil && jobRepoJob != nil { + if jobRepoJob.Validated && !job.Validated() { + job = job.WithValidated(true).WithPools(jobRepoJob.Pools) + } if jobRepoJob.CancelRequested && !job.CancelRequested() { job = job.WithCancelRequested(true) } @@ -136,7 +139,11 @@ func (jobDb *JobDb) reconcileJobDifferences(job *Job, jobRepoJob *database.Job, err = errors.Wrapf(err, "error unmarshalling scheduling info for job %s", jobRepoJob.JobID) return } - job = job.WithJobSchedulingInfo(schedulingInfo) + job, err = job.WithJobSchedulingInfo(schedulingInfo) + if err != nil { + err = errors.Wrapf(err, "error unmarshalling scheduling info for job %s", jobRepoJob.JobID) + return + } } if jobRepoJob.QueuedVersion > job.QueuedVersion() { job = job.WithQueuedVersion(jobRepoJob.QueuedVersion) @@ -255,7 +262,7 @@ func (jobDb *JobDb) schedulerJobFromDatabaseJob(dbJob *database.Job) (*Job, erro } } - job := jobDb.NewJob( + job, err := jobDb.NewJob( dbJob.JobID, dbJob.JobSet, dbJob.Queue, @@ -267,7 +274,13 @@ func (jobDb *JobDb) schedulerJobFromDatabaseJob(dbJob *database.Job) (*Job, erro dbJob.CancelByJobsetRequested, dbJob.Cancelled, dbJob.Submitted, + dbJob.Validated, + dbJob.Pools, ) + if err != nil { + return nil, err + } + if dbJob.Failed { // TODO(albin): Let's make this an argument to NewJob. Even better: have the state as an enum argument. job = job.WithFailed(dbJob.Failed) diff --git a/internal/scheduler/jobdb/test_utils.go b/internal/scheduler/jobdb/test_utils.go new file mode 100644 index 00000000000..ea2770bf594 --- /dev/null +++ b/internal/scheduler/jobdb/test_utils.go @@ -0,0 +1,47 @@ +package jobdb + +import ( + "k8s.io/apimachinery/pkg/api/resource" + + schedulerconfiguration "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" +) + +var TestResourceListFactory = MakeTestResourceListFactory() + +func MakeTestResourceListFactory() *internaltypes.ResourceListFactory { + result, _ := internaltypes.MakeResourceListFactory(GetTestSupportedResourceTypes()) + return result +} + +func GetTestSupportedResourceTypes() []schedulerconfiguration.ResourceType { + return []schedulerconfiguration.ResourceType{ + {Name: "memory", Resolution: resource.MustParse("1")}, + {Name: "cpu", Resolution: resource.MustParse("1m")}, + {Name: "nvidia.com/gpu", Resolution: resource.MustParse("1m")}, + {Name: "foo", Resolution: resource.MustParse("1m")}, + } +} + +func WithJobDbJobPodRequirements(job *Job, reqs *schedulerobjects.PodRequirements) *Job { + return JobWithJobSchedulingInfo(job, &schedulerobjects.JobSchedulingInfo{ + PriorityClassName: job.JobSchedulingInfo().PriorityClassName, + SubmitTime: job.JobSchedulingInfo().SubmitTime, + ObjectRequirements: []*schedulerobjects.ObjectRequirements{ + { + Requirements: &schedulerobjects.ObjectRequirements_PodRequirements{ + PodRequirements: reqs, + }, + }, + }, + }) +} + +func JobWithJobSchedulingInfo(job *Job, jobSchedulingInfo *schedulerobjects.JobSchedulingInfo) *Job { + j, err := job.WithJobSchedulingInfo(jobSchedulingInfo) + if err != nil { + panic(err) + } + return j +} diff --git a/internal/scheduler/jobiteration.go b/internal/scheduler/jobiteration.go index f6733c26e39..8f356db66e7 100644 --- a/internal/scheduler/jobiteration.go +++ b/internal/scheduler/jobiteration.go @@ -6,10 +6,10 @@ import ( "golang.org/x/exp/slices" "github.com/armadaproject/armada/internal/common/armadacontext" + armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/common/util" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/jobdb" ) type JobIterator interface { @@ -17,8 +17,8 @@ type JobIterator interface { } type JobRepository interface { - GetQueueJobIds(queueName string) ([]string, error) - GetExistingJobsByIds(ids []string) ([]interfaces.LegacySchedulerJob, error) + GetQueueJobIds(queueName string) []string + GetExistingJobsByIds(ids []string) []*jobdb.Job } type InMemoryJobIterator struct { @@ -60,9 +60,9 @@ func (repo *InMemoryJobRepository) EnqueueMany(jctxs []*schedulercontext.JobSche defer repo.mu.Unlock() updatedQueues := make(map[string]bool) for _, jctx := range jctxs { - queue := jctx.Job.GetQueue() + queue := jctx.Job.Queue() repo.jctxsByQueue[queue] = append(repo.jctxsByQueue[queue], jctx) - repo.jctxsById[jctx.Job.GetId()] = jctx + repo.jctxsById[jctx.Job.Id()] = jctx updatedQueues[queue] = true } for queue := range updatedQueues { @@ -77,27 +77,25 @@ func (repo *InMemoryJobRepository) sortQueue(queue string) { }) } -// Should only be used in testing. -func (repo *InMemoryJobRepository) GetQueueJobIds(queue string) ([]string, error) { - return util.Map( +func (repo *InMemoryJobRepository) GetQueueJobIds(queue string) []string { + return armadaslices.Map( repo.jctxsByQueue[queue], func(jctx *schedulercontext.JobSchedulingContext) string { - return jctx.Job.GetId() + return jctx.Job.Id() }, - ), nil + ) } -// Should only be used in testing. -func (repo *InMemoryJobRepository) GetExistingJobsByIds(jobIds []string) ([]interfaces.LegacySchedulerJob, error) { +func (repo *InMemoryJobRepository) GetExistingJobsByIds(jobIds []string) []*jobdb.Job { repo.mu.Lock() defer repo.mu.Unlock() - rv := make([]interfaces.LegacySchedulerJob, 0, len(jobIds)) + rv := make([]*jobdb.Job, 0, len(jobIds)) for _, jobId := range jobIds { if jctx, ok := repo.jctxsById[jobId]; ok { rv = append(rv, jctx.Job) } } - return rv, nil + return rv } func (repo *InMemoryJobRepository) GetJobIterator(queue string) JobIterator { @@ -107,85 +105,35 @@ func (repo *InMemoryJobRepository) GetJobIterator(queue string) JobIterator { } // QueuedJobsIterator is an iterator over all jobs in a queue. -// It loads jobs asynchronously in batches from the underlying database. -// This is necessary for good performance when jobs are stored in Redis. type QueuedJobsIterator struct { - ctx *armadacontext.Context - err error - c chan interfaces.LegacySchedulerJob + repo JobRepository + jobIds []string priorityClasses map[string]types.PriorityClass + idx int + ctx *armadacontext.Context } -func NewQueuedJobsIterator(ctx *armadacontext.Context, queue string, repo JobRepository, priorityClasses map[string]types.PriorityClass) (*QueuedJobsIterator, error) { - batchSize := 16 - g, ctx := armadacontext.ErrGroup(ctx) - it := &QueuedJobsIterator{ - ctx: ctx, - c: make(chan interfaces.LegacySchedulerJob, 2*batchSize), // 2x batchSize to load one batch async. +func NewQueuedJobsIterator(ctx *armadacontext.Context, queue string, repo JobRepository, priorityClasses map[string]types.PriorityClass) *QueuedJobsIterator { + return &QueuedJobsIterator{ + jobIds: repo.GetQueueJobIds(queue), + repo: repo, priorityClasses: priorityClasses, + ctx: ctx, } - - jobIds, err := repo.GetQueueJobIds(queue) - if err != nil { - it.err = err - return nil, err - } - g.Go(func() error { return queuedJobsIteratorLoader(ctx, jobIds, it.c, batchSize, repo) }) - - return it, nil } func (it *QueuedJobsIterator) Next() (*schedulercontext.JobSchedulingContext, error) { - // Once this function has returned error, - // it will return this error on every invocation. - if it.err != nil { - return nil, it.err - } - - // Get one job that was loaded asynchronously. select { case <-it.ctx.Done(): - it.err = it.ctx.Err() // Return an error if called again. - return nil, it.err - case job, ok := <-it.c: - if !ok { + return nil, it.ctx.Err() + default: + if it.idx >= len(it.jobIds) { return nil, nil } - return schedulercontext.JobSchedulingContextFromJob(it.priorityClasses, job), nil - } -} - -// queuedJobsIteratorLoader loads jobs from Redis lazily. -// Used with QueuedJobsIterator. -func queuedJobsIteratorLoader( - ctx *armadacontext.Context, - jobIds []string, - ch chan interfaces.LegacySchedulerJob, - batchSize int, - repo JobRepository, -) error { - defer close(ch) - batch := make([]string, batchSize) - for i, jobId := range jobIds { - batch[i%len(batch)] = jobId - if (i+1)%len(batch) == 0 || i == len(jobIds)-1 { - jobs, err := repo.GetExistingJobsByIds(batch[:i%len(batch)+1]) - if err != nil { - return err - } - for _, job := range jobs { - if job == nil { - continue - } - select { - case <-ctx.Done(): - return ctx.Err() - case ch <- job: - } - } - } + job := it.repo.GetExistingJobsByIds([]string{it.jobIds[it.idx]}) + it.idx++ + return schedulercontext.JobSchedulingContextFromJob(job[0]), nil } - return nil } // MultiJobsIterator chains several JobIterators together in the order provided. @@ -204,12 +152,14 @@ func (it *MultiJobsIterator) Next() (*schedulercontext.JobSchedulingContext, err if it.i >= len(it.its) { return nil, nil } - if v, err := it.its[it.i].Next(); err != nil { + v, err := it.its[it.i].Next() + if err != nil { return nil, err - } else if v == nil { + } + if v == nil { it.i++ return it.Next() } else { - return v, nil + return v, err } } diff --git a/internal/scheduler/jobiteration_test.go b/internal/scheduler/jobiteration_test.go index d555e4a014a..d714a01531c 100644 --- a/internal/scheduler/jobiteration_test.go +++ b/internal/scheduler/jobiteration_test.go @@ -7,68 +7,34 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" - "github.com/armadaproject/armada/pkg/api" ) func TestInMemoryJobRepository(t *testing.T) { - T := time.Now() - jobs := []*api.Job{ - { - Queue: "A", - Id: "3", - Priority: 1, - Created: T.Add(3 * time.Second), - PodSpec: &v1.PodSpec{}, - }, - { - Queue: "A", - Id: "1", - Priority: 1, - Created: T.Add(1 * time.Second), - PodSpec: &v1.PodSpec{}, - }, - { - Queue: "A", - Id: "2", - Priority: 1, - Created: T.Add(2 * time.Second), - PodSpec: &v1.PodSpec{}, - }, - { - Queue: "A", - Id: "5", - Priority: 3, - PodSpec: &v1.PodSpec{}, - }, - { - Queue: "A", - Id: "0", - Priority: 0, - PodSpec: &v1.PodSpec{}, - }, - { - Queue: "A", - Id: "4", - Priority: 2, - PodSpec: &v1.PodSpec{}, - }, + jobs := []*jobdb.Job{ + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(3).WithPriority(1), + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(1).WithPriority(1), + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(2).WithPriority(1), + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(0).WithPriority(3), + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(0).WithPriority(0), + testfixtures.TestJob("A", util.ULID(), "armada-default", nil).WithCreated(0).WithPriority(2), } jctxs := make([]*schedulercontext.JobSchedulingContext, len(jobs)) for i, job := range jobs { - jctxs[i] = &schedulercontext.JobSchedulingContext{Job: job} + jctxs[i] = &schedulercontext.JobSchedulingContext{Job: job, ResourceRequirements: job.EfficientResourceRequirements()} } repo := NewInMemoryJobRepository() repo.EnqueueMany(jctxs) - expected := []string{"0", "1", "2", "3", "4", "5"} - actual := make([]string, 0) + expected := []*jobdb.Job{ + jobs[4], jobs[1], jobs[2], jobs[0], jobs[5], jobs[3], + } + actual := make([]*jobdb.Job, 0) it := repo.GetJobIterator("A") for { jctx, err := it.Next() @@ -76,7 +42,7 @@ func TestInMemoryJobRepository(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job) } assert.Equal(t, expected, actual) } @@ -85,25 +51,20 @@ func TestMultiJobsIterator_TwoQueues(t *testing.T) { repo := newMockJobRepository() expected := make([]string, 0) for _, req := range testfixtures.N1CpuPodReqs("A", 0, 5) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } for _, req := range testfixtures.N1CpuPodReqs("B", 0, 5) { - job := apiJobFromPodSpec("B", podSpecFromPodRequirements(req)) - job.Queue = "B" + job := jobFromPodSpec("B", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } ctx := armadacontext.Background() its := make([]JobIterator, 3) for i, queue := range []string{"A", "B", "C"} { - it, err := NewQueuedJobsIterator(ctx, queue, repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, queue, repo, nil) its[i] = it } it := NewMultiJobsIterator(its...) @@ -115,7 +76,7 @@ func TestMultiJobsIterator_TwoQueues(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job.Id()) } assert.Equal(t, expected, actual) v, err := it.Next() @@ -127,17 +88,12 @@ func TestQueuedJobsIterator_OneQueue(t *testing.T) { repo := newMockJobRepository() expected := make([]string, 0) for _, req := range testfixtures.N1CpuPodReqs("A", 0, 10) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } - ctx := armadacontext.Background() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) actual := make([]string, 0) for { jctx, err := it.Next() @@ -145,7 +101,7 @@ func TestQueuedJobsIterator_OneQueue(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job.Id()) } assert.Equal(t, expected, actual) } @@ -154,17 +110,12 @@ func TestQueuedJobsIterator_ExceedsBufferSize(t *testing.T) { repo := newMockJobRepository() expected := make([]string, 0) for _, req := range testfixtures.N1CpuPodReqs("A", 0, 17) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } - ctx := armadacontext.Background() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) actual := make([]string, 0) for { jctx, err := it.Next() @@ -172,7 +123,7 @@ func TestQueuedJobsIterator_ExceedsBufferSize(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job.Id()) } assert.Equal(t, expected, actual) } @@ -181,17 +132,12 @@ func TestQueuedJobsIterator_ManyJobs(t *testing.T) { repo := newMockJobRepository() expected := make([]string, 0) for _, req := range testfixtures.N1CpuPodReqs("A", 0, 113) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } - ctx := armadacontext.Background() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) actual := make([]string, 0) for { jctx, err := it.Next() @@ -199,7 +145,7 @@ func TestQueuedJobsIterator_ManyJobs(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job.Id()) } assert.Equal(t, expected, actual) } @@ -208,21 +154,17 @@ func TestCreateQueuedJobsIterator_TwoQueues(t *testing.T) { repo := newMockJobRepository() expected := make([]string, 0) for _, req := range testfixtures.N1CpuPodReqs("A", 0, 10) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) + job := jobFromPodSpec("A", req) repo.Enqueue(job) - expected = append(expected, job.Id) + expected = append(expected, job.Id()) } for _, req := range testfixtures.N1CpuPodReqs("B", 0, 10) { - job := apiJobFromPodSpec("B", podSpecFromPodRequirements(req)) + job := jobFromPodSpec("B", req) repo.Enqueue(job) } - ctx := armadacontext.Background() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) actual := make([]string, 0) for { jctx, err := it.Next() @@ -230,7 +172,7 @@ func TestCreateQueuedJobsIterator_TwoQueues(t *testing.T) { if jctx == nil { break } - actual = append(actual, jctx.Job.GetId()) + actual = append(actual, jctx.Job.Id()) } assert.Equal(t, expected, actual) } @@ -238,18 +180,14 @@ func TestCreateQueuedJobsIterator_TwoQueues(t *testing.T) { func TestCreateQueuedJobsIterator_RespectsTimeout(t *testing.T) { repo := newMockJobRepository() for _, req := range testfixtures.N1CpuPodReqs("A", 0, 10) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) } ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond) time.Sleep(20 * time.Millisecond) defer cancel() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) job, err := it.Next() assert.Nil(t, job) assert.ErrorIs(t, err, context.DeadlineExceeded) @@ -263,20 +201,13 @@ func TestCreateQueuedJobsIterator_RespectsTimeout(t *testing.T) { func TestCreateQueuedJobsIterator_NilOnEmpty(t *testing.T) { repo := newMockJobRepository() for _, req := range testfixtures.N1CpuPodReqs("A", 0, 10) { - job := apiJobFromPodSpec("A", podSpecFromPodRequirements(req)) - job.Queue = "A" + job := jobFromPodSpec("A", req) repo.Enqueue(job) } - ctx := armadacontext.Background() - it, err := NewQueuedJobsIterator(ctx, "A", repo, nil) - if !assert.NoError(t, err) { - return - } + it := NewQueuedJobsIterator(ctx, "A", repo, nil) for job, err := it.Next(); job != nil; job, err = it.Next() { - if !assert.NoError(t, err) { - return - } + require.NoError(t, err) } job, err := it.Next() assert.Nil(t, job) @@ -285,8 +216,8 @@ func TestCreateQueuedJobsIterator_NilOnEmpty(t *testing.T) { // TODO: Deprecate in favour of InMemoryRepo. type mockJobRepository struct { - jobsByQueue map[string][]*api.Job - jobsById map[string]*api.Job + jobsByQueue map[string][]*jobdb.Job + jobsById map[string]*jobdb.Job // Ids of all jobs hat were leased to an executor. leasedJobs map[string]bool getQueueJobIdsDelay time.Duration @@ -294,82 +225,52 @@ type mockJobRepository struct { func newMockJobRepository() *mockJobRepository { return &mockJobRepository{ - jobsByQueue: make(map[string][]*api.Job), - jobsById: make(map[string]*api.Job), + jobsByQueue: make(map[string][]*jobdb.Job), + jobsById: make(map[string]*jobdb.Job), leasedJobs: make(map[string]bool), } } -func (repo *mockJobRepository) EnqueueMany(jobs []*api.Job) { +func (repo *mockJobRepository) EnqueueMany(jobs []*jobdb.Job) { for _, job := range jobs { repo.Enqueue(job) } } -func (repo *mockJobRepository) Enqueue(job *api.Job) { - repo.jobsByQueue[job.Queue] = append(repo.jobsByQueue[job.Queue], job) - repo.jobsById[job.Id] = job +func (repo *mockJobRepository) Enqueue(job *jobdb.Job) { + repo.jobsByQueue[job.Queue()] = append(repo.jobsByQueue[job.Queue()], job) + repo.jobsById[job.Id()] = job } -func (repo *mockJobRepository) GetJobIterator(ctx *armadacontext.Context, queue string) (JobIterator, error) { +func (repo *mockJobRepository) GetJobIterator(ctx *armadacontext.Context, queue string) JobIterator { return NewQueuedJobsIterator(ctx, queue, repo, nil) } -func (repo *mockJobRepository) GetQueueJobIds(queue string) ([]string, error) { +func (repo *mockJobRepository) GetQueueJobIds(queue string) []string { time.Sleep(repo.getQueueJobIdsDelay) if jobs, ok := repo.jobsByQueue[queue]; ok { rv := make([]string, 0, len(jobs)) for _, job := range jobs { - if !repo.leasedJobs[job.Id] { - rv = append(rv, job.Id) + if !repo.leasedJobs[job.Id()] { + rv = append(rv, job.Id()) } } - return rv, nil + return rv } else { - return make([]string, 0), nil + return make([]string, 0) } } -func (repo *mockJobRepository) GetExistingJobsByIds(jobIds []string) ([]interfaces.LegacySchedulerJob, error) { - rv := make([]interfaces.LegacySchedulerJob, len(jobIds)) +func (repo *mockJobRepository) GetExistingJobsByIds(jobIds []string) []*jobdb.Job { + rv := make([]*jobdb.Job, len(jobIds)) for i, jobId := range jobIds { if job, ok := repo.jobsById[jobId]; ok { rv[i] = job } } - return rv, nil -} - -func (repo *mockJobRepository) TryLeaseJobs(clusterId string, queue string, jobs []*api.Job) ([]*api.Job, error) { - successfullyLeasedJobs := make([]*api.Job, 0, len(jobs)) - for _, job := range jobs { - if !repo.leasedJobs[job.Id] { - successfullyLeasedJobs = append(successfullyLeasedJobs, job) - repo.leasedJobs[job.Id] = true - } - } - return successfullyLeasedJobs, nil -} - -func apiJobFromPodSpec(queue string, podSpec *v1.PodSpec) *api.Job { - return &api.Job{ - Id: util.NewULID(), - PodSpec: podSpec, - Queue: queue, - } + return rv } -func podSpecFromPodRequirements(req *schedulerobjects.PodRequirements) *v1.PodSpec { - return &v1.PodSpec{ - NodeSelector: req.NodeSelector, - Affinity: req.Affinity, - Tolerations: req.Tolerations, - Priority: &req.Priority, - PreemptionPolicy: (*v1.PreemptionPolicy)(&req.PreemptionPolicy), - Containers: []v1.Container{ - { - Resources: req.ResourceRequirements, - }, - }, - } +func jobFromPodSpec(queue string, req *schedulerobjects.PodRequirements) *jobdb.Job { + return testfixtures.TestJob(queue, util.ULID(), "armada-default", req) } diff --git a/internal/scheduler/kubernetesobjects/affinity/affinity.go b/internal/scheduler/kubernetesobjects/affinity/affinity.go index 7f20812e246..7cb731bb29f 100644 --- a/internal/scheduler/kubernetesobjects/affinity/affinity.go +++ b/internal/scheduler/kubernetesobjects/affinity/affinity.go @@ -2,9 +2,8 @@ package affinity import ( "github.com/pkg/errors" + "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" - - "github.com/armadaproject/armada/internal/common/util" ) func AddNodeAntiAffinity(affinity *v1.Affinity, labelName string, labelValue string) error { @@ -51,7 +50,7 @@ func addAvoidNodeAffinityToNodeSelectorTerm(term *v1.NodeSelectorTerm, labelName mexp = &term.MatchExpressions[len(term.MatchExpressions)-1] } - if !util.ContainsString(mexp.Values, labelValue) { + if !slices.Contains(mexp.Values, labelValue) { mexp.Values = append(mexp.Values, labelValue) } } diff --git a/internal/scheduler/metrics.go b/internal/scheduler/metrics.go index 6e2e87b820b..a5eeeddc9a1 100644 --- a/internal/scheduler/metrics.go +++ b/internal/scheduler/metrics.go @@ -7,15 +7,16 @@ import ( "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" - "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" + armadamaps "github.com/armadaproject/armada/internal/common/maps" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" "github.com/armadaproject/armada/internal/common/resource" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" + "github.com/armadaproject/armada/internal/scheduler/queue" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -50,24 +51,24 @@ func (m metricProvider) GetRunningJobMetrics(queueName string) []*commonmetrics. // The metrics themselves are calculated asynchronously every refreshPeriod type MetricsCollector struct { jobDb *jobdb.JobDb - queueRepository repository.QueueRepository + queueCache queue.QueueCache executorRepository database.ExecutorRepository poolAssigner PoolAssigner refreshPeriod time.Duration - clock clock.Clock + clock clock.WithTicker state atomic.Value } func NewMetricsCollector( jobDb *jobdb.JobDb, - queueRepository repository.QueueRepository, + queueCache queue.QueueCache, executorRepository database.ExecutorRepository, poolAssigner PoolAssigner, refreshPeriod time.Duration, ) *MetricsCollector { return &MetricsCollector{ jobDb: jobDb, - queueRepository: queueRepository, + queueCache: queueCache, executorRepository: executorRepository, poolAssigner: poolAssigner, refreshPeriod: refreshPeriod, @@ -129,19 +130,22 @@ func (c *MetricsCollector) refresh(ctx *armadacontext.Context) error { } func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]prometheus.Metric, error) { - queues, err := c.queueRepository.GetAllQueues(ctx) + queues, err := c.queueCache.GetAll(ctx) if err != nil { return nil, err } provider := metricProvider{queueStates: make(map[string]*queueState, len(queues))} queuedJobsCount := make(map[string]int, len(queues)) + schedulingKeysByQueue := make(map[string]map[schedulerobjects.SchedulingKey]bool, len(queues)) + for _, queue := range queues { provider.queueStates[queue.Name] = &queueState{ queuedJobRecorder: commonmetrics.NewJobMetricsRecorder(), runningJobRecorder: commonmetrics.NewJobMetricsRecorder(), } queuedJobsCount[queue.Name] = 0 + schedulingKeysByQueue[queue.Name] = map[schedulerobjects.SchedulingKey]bool{} } err = c.poolAssigner.Refresh(ctx) @@ -161,7 +165,7 @@ func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]pro continue } - pool, err := c.poolAssigner.AssignPool(job) + pools, err := c.poolAssigner.AssignPools(job) if err != nil { return nil, err } @@ -186,6 +190,7 @@ func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]pro recorder = qs.queuedJobRecorder timeInState = currentTime.Sub(time.Unix(0, job.Created())) queuedJobsCount[job.Queue()]++ + schedulingKeysByQueue[job.Queue()][job.SchedulingKey()] = true } else { run := job.LatestRun() if run == nil { @@ -200,11 +205,17 @@ func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]pro recorder = qs.runningJobRecorder timeInState = currentTime.Sub(time.Unix(0, run.Created())) } - recorder.RecordJobRuntime(pool, priorityClass, timeInState) - recorder.RecordResources(pool, priorityClass, jobResources) + for _, pool := range pools { + recorder.RecordJobRuntime(pool, priorityClass, timeInState) + recorder.RecordResources(pool, priorityClass, jobResources) + } } - queueMetrics := commonmetrics.CollectQueueMetrics(queuedJobsCount, provider) + queuedDistinctSchedulingKeysCount := armadamaps.MapValues(schedulingKeysByQueue, func(schedulingKeys map[schedulerobjects.SchedulingKey]bool) int { + return len(schedulingKeys) + }) + + queueMetrics := commonmetrics.CollectQueueMetrics(queuedJobsCount, queuedDistinctSchedulingKeysCount, provider) return queueMetrics, nil } @@ -289,7 +300,7 @@ func (c *MetricsCollector) updateClusterMetrics(ctx *armadacontext.Context) ([]p cluster: executor.Id, pool: executor.Pool, queueName: job.Queue(), - priorityClass: job.GetPriorityClassName(), + priorityClass: job.PriorityClassName(), nodeType: node.ReportingNodeType, } addToResourceListMap(allocatedResourceByQueue, queueKey, schedulerobjects.ResourceListFromV1ResourceList(podRequirements.ResourceRequirements.Requests)) diff --git a/internal/scheduler/metrics/metrics.go b/internal/scheduler/metrics/metrics.go index d3a7b046db8..3952599eb97 100644 --- a/internal/scheduler/metrics/metrics.go +++ b/internal/scheduler/metrics/metrics.go @@ -62,6 +62,9 @@ type Metrics struct { // Messages that match no regex map to -1. matchedRegexIndexByErrorMessage *lru.Cache + // Histogram of completed run durations by queue + completedRunDurations *prometheus.HistogramVec + // Map from resource name to the counter and counterSeconds Vecs for that resource. resourceCounters map[v1.ResourceName]*prometheus.CounterVec } @@ -95,7 +98,20 @@ func New(config configuration.MetricsConfig) (*Metrics, error) { errorRegexes: errorRegexes, matchedRegexIndexByErrorMessage: matchedRegexIndexByError, - + completedRunDurations: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "job_run_completed_duration_seconds", + Help: "Time", + Buckets: prometheus.ExponentialBuckets( + 2, + 2, + 20), + ConstLabels: map[string]string{}, + }, + []string{"queue"}, + ), resourceCounters: make(map[v1.ResourceName]*prometheus.CounterVec), }, nil } @@ -129,6 +145,7 @@ func (m *Metrics) Describe(ch chan<- *prometheus.Desc) { for _, metric := range m.resourceCounters { metric.Describe(ch) } + m.completedRunDurations.Describe(ch) } // Collect and then reset all metrics. @@ -148,6 +165,7 @@ func (m *Metrics) Collect(ch chan<- prometheus.Metric) { } m.timeOfMostRecentReset = t } + m.completedRunDurations.Collect(ch) } func (m *Metrics) UpdateMany( @@ -279,7 +297,7 @@ func (m *Metrics) UpdateSucceeded(job *jobdb.Job) error { } func (m *Metrics) UpdateLeased(jctx *schedulercontext.JobSchedulingContext) error { - job := jctx.Job.(*jobdb.Job) + job := jctx.Job latestRun := job.LatestRun() duration, priorState := stateDuration(job, latestRun, &jctx.Created) labels := m.buffer[0:0] @@ -373,7 +391,7 @@ func (m *Metrics) indexOfFirstMatchingRegexFromErrorMessage(message string) (int func appendLabelsFromJob(labels []string, job *jobdb.Job) []string { executor, nodeName := executorAndNodeNameFromRun(job.LatestRun()) - labels = append(labels, job.GetQueue()) + labels = append(labels, job.Queue()) labels = append(labels, executor) labels = append(labels, "") // No nodeType. labels = append(labels, nodeName) @@ -381,9 +399,9 @@ func appendLabelsFromJob(labels []string, job *jobdb.Job) []string { } func appendLabelsFromJobSchedulingContext(labels []string, jctx *schedulercontext.JobSchedulingContext) []string { - job := jctx.Job.(*jobdb.Job) + job := jctx.Job executor, nodeName := executorAndNodeNameFromRun(job.LatestRun()) - labels = append(labels, job.GetQueue()) + labels = append(labels, job.Queue()) labels = append(labels, executor) wellKnownNodeType := "" if pctx := jctx.PodSchedulingContext; pctx != nil { @@ -441,7 +459,11 @@ func (m *Metrics) updateMetrics(labels []string, job *jobdb.Job, stateDuration t c.Add(stateDuration.Seconds()) } - requests := job.GetResourceRequirements().Requests + if job.HasRuns() && job.LatestRun().InTerminalState() { + m.completedRunDurations.WithLabelValues(job.Queue()).Observe(stateDuration.Seconds()) + } + + requests := job.ResourceRequirements().Requests for _, resource := range m.config.TrackedResourceNames { if r, ok := m.config.ResourceRenaming[resource]; ok { resource = v1.ResourceName(r) diff --git a/internal/scheduler/metrics_test.go b/internal/scheduler/metrics_test.go index 8afdb9fa9a2..53ad50e9c68 100644 --- a/internal/scheduler/metrics_test.go +++ b/internal/scheduler/metrics_test.go @@ -4,14 +4,12 @@ import ( "testing" "time" - "github.com/armadaproject/armada/pkg/client/queue" - "github.com/golang/mock/gomock" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/armadaproject/armada/internal/common/armadacontext" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" @@ -19,6 +17,7 @@ import ( schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" + "github.com/armadaproject/armada/pkg/api" ) func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { @@ -34,16 +33,17 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { tests := map[string]struct { initialJobs []*jobdb.Job defaultPool string - poolMappings map[string]string - queues []queue.Queue + poolMappings map[string][]string + queues []*api.Queue expected []prometheus.Metric }{ "queued metrics": { initialJobs: queuedJobs, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, defaultPool: testfixtures.TestPool, expected: []prometheus.Metric{ commonmetrics.NewQueueSizeMetric(3.0, testfixtures.TestQueue), + commonmetrics.NewQueueDistinctSchedulingKeyMetric(1.0, testfixtures.TestQueue), commonmetrics.NewQueueDuration(3, 300, map[float64]uint64{60: 1, 600: 3, 1800: 3, 3600: 3, 10800: 3, 43200: 3, 86400: 3, 172800: 3, 604800: 3}, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), @@ -64,10 +64,11 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { }, "running metrics": { initialJobs: runningJobs, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, defaultPool: testfixtures.TestPool, expected: []prometheus.Metric{ commonmetrics.NewQueueSizeMetric(0.0, testfixtures.TestQueue), + commonmetrics.NewQueueDistinctSchedulingKeyMetric(0.0, testfixtures.TestQueue), commonmetrics.NewJobRunRunDuration(3, 300, map[float64]uint64{60: 1, 600: 3, 1800: 3, 3600: 3, 10800: 3, 43200: 3, 86400: 3, 172800: 3, 604800: 3}, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), @@ -91,14 +92,14 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { defer cancel() // set up job db with initial jobs - jobDb := testfixtures.NewJobDb() + jobDb := testfixtures.NewJobDb(testfixtures.TestResourceListFactory) txn := jobDb.WriteTxn() err := txn.Upsert(tc.initialJobs) require.NoError(t, err) txn.Commit() - queueRepository := schedulermocks.NewMockQueueRepository(ctrl) - queueRepository.EXPECT().GetAllQueues(ctx).Return(tc.queues, nil).Times(1) + queueCache := schedulermocks.NewMockQueueCache(ctrl) + queueCache.EXPECT().GetAll(ctx).Return(tc.queues, nil).Times(1) poolAssigner := &MockPoolAssigner{tc.defaultPool, tc.poolMappings} executorRepository := schedulermocks.NewMockExecutorRepository(ctrl) @@ -106,7 +107,7 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { collector := NewMetricsCollector( jobDb, - queueRepository, + queueCache, executorRepository, poolAssigner, 2*time.Second, @@ -241,22 +242,22 @@ func TestMetricsCollector_TestCollect_ClusterMetrics(t *testing.T) { defer cancel() // set up job db with initial jobs - jobDb := testfixtures.NewJobDb() + jobDb := testfixtures.NewJobDb(testfixtures.TestResourceListFactory) txn := jobDb.WriteTxn() err := txn.Upsert(tc.jobDbJobs) require.NoError(t, err) txn.Commit() - queueRepository := schedulermocks.NewMockQueueRepository(ctrl) - queueRepository.EXPECT().GetAllQueues(ctx).Return([]queue.Queue{}, nil).Times(1) - poolAssigner := &MockPoolAssigner{testfixtures.TestPool, map[string]string{}} + queueCache := schedulermocks.NewMockQueueCache(ctrl) + queueCache.EXPECT().GetAll(ctx).Return([]*api.Queue{}, nil).Times(1) + poolAssigner := &MockPoolAssigner{testfixtures.TestPool, map[string][]string{}} executorRepository := schedulermocks.NewMockExecutorRepository(ctrl) executorRepository.EXPECT().GetExecutors(ctx).Return(tc.executors, nil) collector := NewMetricsCollector( jobDb, - queueRepository, + queueCache, executorRepository, poolAssigner, 2*time.Second, @@ -301,17 +302,17 @@ func createNode(nodeType string) *schedulerobjects.Node { type MockPoolAssigner struct { defaultPool string - poolsById map[string]string + poolsById map[string][]string } func (m MockPoolAssigner) Refresh(_ *armadacontext.Context) error { return nil } -func (m MockPoolAssigner) AssignPool(j *jobdb.Job) (string, error) { - pool, ok := m.poolsById[j.Id()] +func (m MockPoolAssigner) AssignPools(j *jobdb.Job) ([]string, error) { + pools, ok := m.poolsById[j.Id()] if !ok { - pool = m.defaultPool + return []string{m.defaultPool}, nil } - return pool, nil + return pools, nil } diff --git a/internal/scheduler/mocks/api.go b/internal/scheduler/mocks/api.go new file mode 100644 index 00000000000..7b846a53c83 --- /dev/null +++ b/internal/scheduler/mocks/api.go @@ -0,0 +1,422 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/armadaproject/armada/pkg/api (interfaces: SubmitClient,Submit_GetQueuesClient) + +// Package schedulermocks is a generated GoMock package. +package schedulermocks + +import ( + context "context" + reflect "reflect" + + api "github.com/armadaproject/armada/pkg/api" + types "github.com/gogo/protobuf/types" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" +) + +// MockSubmitClient is a mock of SubmitClient interface. +type MockSubmitClient struct { + ctrl *gomock.Controller + recorder *MockSubmitClientMockRecorder +} + +// MockSubmitClientMockRecorder is the mock recorder for MockSubmitClient. +type MockSubmitClientMockRecorder struct { + mock *MockSubmitClient +} + +// NewMockSubmitClient creates a new mock instance. +func NewMockSubmitClient(ctrl *gomock.Controller) *MockSubmitClient { + mock := &MockSubmitClient{ctrl: ctrl} + mock.recorder = &MockSubmitClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubmitClient) EXPECT() *MockSubmitClientMockRecorder { + return m.recorder +} + +// CancelJobSet mocks base method. +func (m *MockSubmitClient) CancelJobSet(arg0 context.Context, arg1 *api.JobSetCancelRequest, arg2 ...grpc.CallOption) (*types.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CancelJobSet", varargs...) + ret0, _ := ret[0].(*types.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelJobSet indicates an expected call of CancelJobSet. +func (mr *MockSubmitClientMockRecorder) CancelJobSet(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelJobSet", reflect.TypeOf((*MockSubmitClient)(nil).CancelJobSet), varargs...) +} + +// CancelJobs mocks base method. +func (m *MockSubmitClient) CancelJobs(arg0 context.Context, arg1 *api.JobCancelRequest, arg2 ...grpc.CallOption) (*api.CancellationResult, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CancelJobs", varargs...) + ret0, _ := ret[0].(*api.CancellationResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelJobs indicates an expected call of CancelJobs. +func (mr *MockSubmitClientMockRecorder) CancelJobs(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelJobs", reflect.TypeOf((*MockSubmitClient)(nil).CancelJobs), varargs...) +} + +// CreateQueue mocks base method. +func (m *MockSubmitClient) CreateQueue(arg0 context.Context, arg1 *api.Queue, arg2 ...grpc.CallOption) (*types.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateQueue", varargs...) + ret0, _ := ret[0].(*types.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateQueue indicates an expected call of CreateQueue. +func (mr *MockSubmitClientMockRecorder) CreateQueue(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueue", reflect.TypeOf((*MockSubmitClient)(nil).CreateQueue), varargs...) +} + +// CreateQueues mocks base method. +func (m *MockSubmitClient) CreateQueues(arg0 context.Context, arg1 *api.QueueList, arg2 ...grpc.CallOption) (*api.BatchQueueCreateResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateQueues", varargs...) + ret0, _ := ret[0].(*api.BatchQueueCreateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateQueues indicates an expected call of CreateQueues. +func (mr *MockSubmitClientMockRecorder) CreateQueues(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueues", reflect.TypeOf((*MockSubmitClient)(nil).CreateQueues), varargs...) +} + +// DeleteQueue mocks base method. +func (m *MockSubmitClient) DeleteQueue(arg0 context.Context, arg1 *api.QueueDeleteRequest, arg2 ...grpc.CallOption) (*types.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteQueue", varargs...) + ret0, _ := ret[0].(*types.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteQueue indicates an expected call of DeleteQueue. +func (mr *MockSubmitClientMockRecorder) DeleteQueue(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockSubmitClient)(nil).DeleteQueue), varargs...) +} + +// GetQueue mocks base method. +func (m *MockSubmitClient) GetQueue(arg0 context.Context, arg1 *api.QueueGetRequest, arg2 ...grpc.CallOption) (*api.Queue, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetQueue", varargs...) + ret0, _ := ret[0].(*api.Queue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetQueue indicates an expected call of GetQueue. +func (mr *MockSubmitClientMockRecorder) GetQueue(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueue", reflect.TypeOf((*MockSubmitClient)(nil).GetQueue), varargs...) +} + +// GetQueues mocks base method. +func (m *MockSubmitClient) GetQueues(arg0 context.Context, arg1 *api.StreamingQueueGetRequest, arg2 ...grpc.CallOption) (api.Submit_GetQueuesClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetQueues", varargs...) + ret0, _ := ret[0].(api.Submit_GetQueuesClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetQueues indicates an expected call of GetQueues. +func (mr *MockSubmitClientMockRecorder) GetQueues(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueues", reflect.TypeOf((*MockSubmitClient)(nil).GetQueues), varargs...) +} + +// Health mocks base method. +func (m *MockSubmitClient) Health(arg0 context.Context, arg1 *types.Empty, arg2 ...grpc.CallOption) (*api.HealthCheckResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Health", varargs...) + ret0, _ := ret[0].(*api.HealthCheckResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Health indicates an expected call of Health. +func (mr *MockSubmitClientMockRecorder) Health(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Health", reflect.TypeOf((*MockSubmitClient)(nil).Health), varargs...) +} + +// PreemptJobs mocks base method. +func (m *MockSubmitClient) PreemptJobs(arg0 context.Context, arg1 *api.JobPreemptRequest, arg2 ...grpc.CallOption) (*types.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PreemptJobs", varargs...) + ret0, _ := ret[0].(*types.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PreemptJobs indicates an expected call of PreemptJobs. +func (mr *MockSubmitClientMockRecorder) PreemptJobs(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreemptJobs", reflect.TypeOf((*MockSubmitClient)(nil).PreemptJobs), varargs...) +} + +// ReprioritizeJobs mocks base method. +func (m *MockSubmitClient) ReprioritizeJobs(arg0 context.Context, arg1 *api.JobReprioritizeRequest, arg2 ...grpc.CallOption) (*api.JobReprioritizeResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReprioritizeJobs", varargs...) + ret0, _ := ret[0].(*api.JobReprioritizeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReprioritizeJobs indicates an expected call of ReprioritizeJobs. +func (mr *MockSubmitClientMockRecorder) ReprioritizeJobs(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReprioritizeJobs", reflect.TypeOf((*MockSubmitClient)(nil).ReprioritizeJobs), varargs...) +} + +// SubmitJobs mocks base method. +func (m *MockSubmitClient) SubmitJobs(arg0 context.Context, arg1 *api.JobSubmitRequest, arg2 ...grpc.CallOption) (*api.JobSubmitResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SubmitJobs", varargs...) + ret0, _ := ret[0].(*api.JobSubmitResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitJobs indicates an expected call of SubmitJobs. +func (mr *MockSubmitClientMockRecorder) SubmitJobs(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitJobs", reflect.TypeOf((*MockSubmitClient)(nil).SubmitJobs), varargs...) +} + +// UpdateQueue mocks base method. +func (m *MockSubmitClient) UpdateQueue(arg0 context.Context, arg1 *api.Queue, arg2 ...grpc.CallOption) (*types.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateQueue", varargs...) + ret0, _ := ret[0].(*types.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateQueue indicates an expected call of UpdateQueue. +func (mr *MockSubmitClientMockRecorder) UpdateQueue(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateQueue", reflect.TypeOf((*MockSubmitClient)(nil).UpdateQueue), varargs...) +} + +// UpdateQueues mocks base method. +func (m *MockSubmitClient) UpdateQueues(arg0 context.Context, arg1 *api.QueueList, arg2 ...grpc.CallOption) (*api.BatchQueueUpdateResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateQueues", varargs...) + ret0, _ := ret[0].(*api.BatchQueueUpdateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateQueues indicates an expected call of UpdateQueues. +func (mr *MockSubmitClientMockRecorder) UpdateQueues(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateQueues", reflect.TypeOf((*MockSubmitClient)(nil).UpdateQueues), varargs...) +} + +// MockSubmit_GetQueuesClient is a mock of Submit_GetQueuesClient interface. +type MockSubmit_GetQueuesClient struct { + ctrl *gomock.Controller + recorder *MockSubmit_GetQueuesClientMockRecorder +} + +// MockSubmit_GetQueuesClientMockRecorder is the mock recorder for MockSubmit_GetQueuesClient. +type MockSubmit_GetQueuesClientMockRecorder struct { + mock *MockSubmit_GetQueuesClient +} + +// NewMockSubmit_GetQueuesClient creates a new mock instance. +func NewMockSubmit_GetQueuesClient(ctrl *gomock.Controller) *MockSubmit_GetQueuesClient { + mock := &MockSubmit_GetQueuesClient{ctrl: ctrl} + mock.recorder = &MockSubmit_GetQueuesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubmit_GetQueuesClient) EXPECT() *MockSubmit_GetQueuesClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockSubmit_GetQueuesClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockSubmit_GetQueuesClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockSubmit_GetQueuesClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockSubmit_GetQueuesClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockSubmit_GetQueuesClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockSubmit_GetQueuesClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockSubmit_GetQueuesClient) Recv() (*api.StreamingQueueMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*api.StreamingQueueMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockSubmit_GetQueuesClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m *MockSubmit_GetQueuesClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockSubmit_GetQueuesClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method. +func (m *MockSubmit_GetQueuesClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockSubmit_GetQueuesClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method. +func (m *MockSubmit_GetQueuesClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockSubmit_GetQueuesClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSubmit_GetQueuesClient)(nil).Trailer)) +} diff --git a/internal/scheduler/mocks/generate.go b/internal/scheduler/mocks/generate.go index f6e54d48b9b..06e5544c852 100644 --- a/internal/scheduler/mocks/generate.go +++ b/internal/scheduler/mocks/generate.go @@ -4,5 +4,6 @@ package schedulermocks //go:generate mockgen -destination=./leases_getter.go -package=schedulermocks "k8s.io/client-go/kubernetes/typed/coordination/v1" LeasesGetter,LeaseInterface //go:generate mockgen -destination=./job_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/database" JobRepository //go:generate mockgen -destination=./executor_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/database" ExecutorRepository -//go:generate mockgen -destination=./queue_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/armada/repository" QueueRepository //go:generate mockgen -destination=./grpc.go -package=schedulermocks "github.com/armadaproject/armada/pkg/executorapi" ExecutorApi_LeaseJobRunsServer +//go:generate mockgen -destination=./queue_cache.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/queue" QueueCache +//go:generate mockgen -destination=./api.go -package=schedulermocks "github.com/armadaproject/armada/pkg/api" SubmitClient,Submit_GetQueuesClient diff --git a/internal/scheduler/mocks/queue_cache.go b/internal/scheduler/mocks/queue_cache.go new file mode 100644 index 00000000000..1970a5fa010 --- /dev/null +++ b/internal/scheduler/mocks/queue_cache.go @@ -0,0 +1,51 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/armadaproject/armada/internal/scheduler/queue (interfaces: QueueCache) + +// Package schedulermocks is a generated GoMock package. +package schedulermocks + +import ( + reflect "reflect" + + armadacontext "github.com/armadaproject/armada/internal/common/armadacontext" + api "github.com/armadaproject/armada/pkg/api" + gomock "github.com/golang/mock/gomock" +) + +// MockQueueCache is a mock of QueueCache interface. +type MockQueueCache struct { + ctrl *gomock.Controller + recorder *MockQueueCacheMockRecorder +} + +// MockQueueCacheMockRecorder is the mock recorder for MockQueueCache. +type MockQueueCacheMockRecorder struct { + mock *MockQueueCache +} + +// NewMockQueueCache creates a new mock instance. +func NewMockQueueCache(ctrl *gomock.Controller) *MockQueueCache { + mock := &MockQueueCache{ctrl: ctrl} + mock.recorder = &MockQueueCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueueCache) EXPECT() *MockQueueCacheMockRecorder { + return m.recorder +} + +// GetAll mocks base method. +func (m *MockQueueCache) GetAll(arg0 *armadacontext.Context) ([]*api.Queue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", arg0) + ret0, _ := ret[0].([]*api.Queue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockQueueCacheMockRecorder) GetAll(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockQueueCache)(nil).GetAll), arg0) +} diff --git a/internal/scheduler/nodedb/encoding.go b/internal/scheduler/nodedb/encoding.go index b35b2db9bf9..2991d7c8b9c 100644 --- a/internal/scheduler/nodedb/encoding.go +++ b/internal/scheduler/nodedb/encoding.go @@ -3,9 +3,7 @@ package nodedb import ( "encoding/binary" - "k8s.io/apimachinery/pkg/api/resource" - - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" ) // NodeIndexKey returns a []byte to be used as a key with the NodeIndex memdb index. @@ -21,10 +19,10 @@ import ( // // The key layout is such that an index ordered first by the nodeTypeId, then resources[0], and so on. // The byte representation is appended to out, which is returned. -func NodeIndexKey(out []byte, nodeTypeId uint64, resources []resource.Quantity) []byte { +func NodeIndexKey(out []byte, nodeTypeId uint64, resources []int64) []byte { out = EncodeUint64(out, nodeTypeId) for _, q := range resources { - out = EncodeQuantity(out, q) + out = EncodeInt64(out, q) } // Because the key returned by this function should be used with a lower-bound operation on allocatable resources // we set the nodeIndex to 0. @@ -33,39 +31,30 @@ func NodeIndexKey(out []byte, nodeTypeId uint64, resources []resource.Quantity) } // RoundedNodeIndexKeyFromResourceList works like NodeIndexKey, except that prior to constructing the key -// the i-th resource is rounded down to the closest multiple of resourceResolutionMillis[i]. +// the i-th resource is rounded down to the closest multiple of resourceResolution[i]. // This rounding makes iterating over nodes with at least some amount of available resources more efficient. // It also takes as arguments a list of resource names and a resourceList, instead of a list of resources. func RoundedNodeIndexKeyFromResourceList( out []byte, nodeTypeId uint64, resourceNames []string, - resourceResolutionMillis []int64, - rl schedulerobjects.ResourceList, + resourceResolution []int64, + rl internaltypes.ResourceList, nodeIndex uint64, ) []byte { out = EncodeUint64(out, nodeTypeId) for i, name := range resourceNames { - resolution := resourceResolutionMillis[i] - q := rl.Get(name) + resolution := resourceResolution[i] + q := rl.GetByNameZeroIfMissing(name) q = roundQuantityToResolution(q, resolution) - out = EncodeQuantity(out, q) + out = EncodeInt64(out, q) } out = EncodeUint64(out, nodeIndex) return out } -func roundQuantityToResolution(q resource.Quantity, resolutionMillis int64) resource.Quantity { - q.SetMilli((q.MilliValue() / resolutionMillis) * resolutionMillis) - return q -} - -// EncodeQuantity returns the canonical byte representation of a resource.Quantity used within the nodeDb. -// The resulting []byte is such that for two resource.Quantity a and b, a.Cmp(b) = bytes.Compare(enc(a), enc(b)). -// The byte representation is appended to out, which is returned. -func EncodeQuantity(out []byte, val resource.Quantity) []byte { - // We assume that any quantity we want to compare can be represented as an int64. - return EncodeInt64(out, val.MilliValue()) +func roundQuantityToResolution(q int64, resolution int64) int64 { + return (q / resolution) * resolution } // EncodeInt64 returns the canonical byte representation of an int64 used within the nodeDb. diff --git a/internal/scheduler/nodedb/encoding_test.go b/internal/scheduler/nodedb/encoding_test.go index c6e3a53e7c9..43cac336dfb 100644 --- a/internal/scheduler/nodedb/encoding_test.go +++ b/internal/scheduler/nodedb/encoding_test.go @@ -4,157 +4,48 @@ import ( "bytes" "testing" + "github.com/armadaproject/armada/internal/scheduler/testfixtures" + "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" - - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// The memdb internally uses bytes.Compare to compare keys. -// Here, we test that byte representation comparison of quantities works as expected. -func TestEncodeQuantity(t *testing.T) { - tests := map[string]struct { - A resource.Quantity - B resource.Quantity - }{ - "10Mi 10Mi": { - A: resource.MustParse("10Mi"), - B: resource.MustParse("10Mi"), - }, - "5Mi 10Mi": { - A: resource.MustParse("5Mi"), - B: resource.MustParse("10Mi"), - }, - "10Gi 10Gi": { - A: resource.MustParse("10Gi"), - B: resource.MustParse("10Gi"), - }, - "5Gi 10Gi": { - A: resource.MustParse("5Gi"), - B: resource.MustParse("10Gi"), - }, - "1 1": { - A: resource.MustParse("1"), - B: resource.MustParse("1"), - }, - "1 2": { - A: resource.MustParse("1"), - B: resource.MustParse("2"), - }, - "-1 1": { - A: resource.MustParse("-1"), - B: resource.MustParse("1"), - }, - "100m 100m": { - A: resource.MustParse("100M"), - B: resource.MustParse("100M"), - }, - "100m 200m": { - A: resource.MustParse("100M"), - B: resource.MustParse("200M"), - }, - "54870m 54871m": { - A: resource.MustParse("54870m"), - B: resource.MustParse("54871m"), - }, - "1000Ti 1001Ti": { - A: resource.MustParse("1000Ti"), - B: resource.MustParse("1001Ti"), - }, - "1000Pi 1001Pi": { - A: resource.MustParse("1000Pi"), - B: resource.MustParse("1001Pi"), - }, - "1 1001m": { - A: resource.MustParse("1"), - B: resource.MustParse("1001m"), - }, - "1 1000m": { - A: resource.MustParse("1"), - B: resource.MustParse("1000m"), - }, - "1Gi 1001Mi": { - A: resource.MustParse("1Gi"), - B: resource.MustParse("1001Mi"), - }, - "1Gi 1000Mi": { - A: resource.MustParse("1Gi"), - B: resource.MustParse("1000Mi"), - }, - "5188205838208Ki 5188205838209Ki": { - A: resource.MustParse("5188205838208Ki"), - B: resource.MustParse("5188205838209Ki"), - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - expected := tc.A.Cmp(tc.B) - actual := bytes.Compare(EncodeQuantity(nil, tc.A), EncodeQuantity(nil, tc.B)) - assert.Equal(t, expected, actual) - - expected = tc.B.Cmp(tc.A) - actual = bytes.Compare(EncodeQuantity(nil, tc.B), EncodeQuantity(nil, tc.A)) - assert.Equal(t, expected, actual) - }) - } -} - func TestRoundQuantityToResolution(t *testing.T) { tests := map[string]struct { - q resource.Quantity - resolutionMillis int64 - expected resource.Quantity + q int64 + resolution int64 + expected int64 }{ - "1Ki": { - q: resource.MustParse("1Ki"), - resolutionMillis: 1, - expected: resource.MustParse("1Ki"), - }, "resolution equal to quantity": { - q: resource.MustParse("1Ki"), - resolutionMillis: 1024 * 1000, - expected: resource.MustParse("1Ki"), + q: 1024, + resolution: 1024, + expected: 1024, }, - "0": { - q: resource.MustParse("0"), - resolutionMillis: 1, - expected: resource.MustParse("0"), - }, - "1m": { - q: resource.MustParse("1m"), - resolutionMillis: 1, - expected: resource.MustParse("1m"), + "just above cutoff": { + q: 2001, + resolution: 1000, + expected: 2000, }, - "1": { - q: resource.MustParse("1"), - resolutionMillis: 1, - expected: resource.MustParse("1"), + "just below cutoff": { + q: 2999, + resolution: 1000, + expected: 2000, }, - "resolution 3": { - q: resource.MustParse("1"), - resolutionMillis: 3, - expected: resource.MustParse("999m"), + "0": { + q: 0, + resolution: 1000, + expected: 0, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - qc := tc.q.DeepCopy() - actual := roundQuantityToResolution(tc.q, tc.resolutionMillis) - assert.True(t, qc.Equal(tc.q)) - assert.Truef(t, actual.Equal(tc.expected), "expected %s, but got %s", tc.expected.String(), actual.String()) - - qDec := tc.q.DeepCopy() - qDec.ToDec() - qDecCopy := qDec.DeepCopy() - actualDec := roundQuantityToResolution(qDec, tc.resolutionMillis) - assert.True(t, qDecCopy.Equal(qDec)) - assert.Truef(t, actualDec.Equal(tc.expected), "expected %s, but got %s", tc.expected.String(), actual.String()) + actual := roundQuantityToResolution(tc.q, tc.resolution) + assert.Equal(t, tc.expected, actual, name) }) } } func TestNodeIndexKeyComparison(t *testing.T) { - v1 := resource.MustParse("1") actualRoundedKey := RoundedNodeIndexKeyFromResourceList( nil, 0, @@ -162,50 +53,32 @@ func TestNodeIndexKeyComparison(t *testing.T) { "cpu", "memory", "nvidia.com/gpu", - "nvidia.com/mig-1g.10gb", - "nvidia.com/mig-1g.20gb", - "nvidia.com/mig-1g.40gb", }, []int64{ - v1.MilliValue(), - v1.MilliValue(), - v1.MilliValue(), - v1.MilliValue(), - v1.MilliValue(), - v1.MilliValue(), - }, - schedulerobjects.ResourceList{ - Resources: map[string]resource.Quantity{ - "cpu": *resource.NewScaledQuantity(999958006, -9), - "memory": *resource.NewScaledQuantity(11823681536, 0), - "nvidia.com/gpu": *resource.NewScaledQuantity(0, 0), - "nvidia.com/mig-1g.10gb": *resource.NewScaledQuantity(0, 0), - "nvidia.com/mig-1g.20gb": *resource.NewScaledQuantity(0, 0), - "nvidia.com/mig-1g.40gb": *resource.NewScaledQuantity(0, 0), - }, - }, + 1000, + 1000, + 1000, + }, + testfixtures.TestResourceListFactory.FromJobResourceListIgnoreUnknown( + map[string]resource.Quantity{ + "cpu": *resource.NewScaledQuantity(999958006, -9), + "memory": *resource.NewScaledQuantity(11823681536, 0), + "nvidia.com/gpu": *resource.NewScaledQuantity(0, 0), + }), 0, ) + actualKey := NodeIndexKey( nil, 0, - []resource.Quantity{ - *resource.NewScaledQuantity(999958006, -9), - *resource.NewScaledQuantity(11823681536, 0), - *resource.NewScaledQuantity(0, 0), - *resource.NewScaledQuantity(0, 0), - *resource.NewScaledQuantity(0, 0), - *resource.NewScaledQuantity(0, 0), - }, + []int64{1000, 11823681000, 0}, ) + expected := []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nodeTypeId 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, // cpu - 0x80, 0x00, 0x0a, 0xc0, 0xea, 0x56, 0x80, 0x00, // memory + 0x80, 0x00, 0x00, 0x02, 0xc0, 0xbf, 0x0d, 0xe8, // memory 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com.gpu - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.10gb - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.20gb - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.40gb 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nodeIndex } assert.Equal(t, expected, actualRoundedKey) @@ -215,7 +88,7 @@ func TestNodeIndexKeyComparison(t *testing.T) { func TestNodeIndexKey(t *testing.T) { type nodeIndexKeyValues struct { nodeTypeId uint64 - resources []resource.Quantity + resources []int64 } tests := map[string]struct { a nodeIndexKeyValues @@ -240,41 +113,41 @@ func TestNodeIndexKey(t *testing.T) { "equal nodeTypeId and resources": { a: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, b: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, }, "equal nodeTypeId and unequal resources": { a: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("2"), resource.MustParse("1")}, + resources: []int64{2, 1}, }, b: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, }, "unequal nodeTypeId and equal resources": { a: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, b: nodeIndexKeyValues{ nodeTypeId: 11, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, }, "negative resource": { a: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}, + resources: []int64{1, 2}, }, b: nodeIndexKeyValues{ nodeTypeId: 10, - resources: []resource.Quantity{resource.MustParse("-1"), resource.MustParse("2")}, + resources: []int64{-1, 2}, }, }, } @@ -286,8 +159,11 @@ func TestNodeIndexKey(t *testing.T) { } for i, qa := range a.resources { qb := b.resources[i] - if cmp := qa.Cmp(qb); cmp != 0 { - return cmp + if qa > qb { + return 1 + } + if qb > qa { + return -1 } } return 0 @@ -309,40 +185,26 @@ func TestNodeIndexKey(t *testing.T) { func TestRoundedNodeIndexKeyFromResourceList(t *testing.T) { assert.Equal( t, - NodeIndexKey(nil, 0, []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}), + NodeIndexKey(nil, 0, []int64{1, 2000}), RoundedNodeIndexKeyFromResourceList( nil, 0, - []string{"foo", "bar"}, - []int64{1000, 2000}, - schedulerobjects.ResourceList{ - Resources: map[string]resource.Quantity{"foo": resource.MustParse("1"), "bar": resource.MustParse("2")}, - }, + []string{"memory", "cpu"}, + []int64{1, 2000}, + testfixtures.TestResourceListFactory.FromNodeProto(map[string]resource.Quantity{"memory": resource.MustParse("1"), "cpu": resource.MustParse("2")}), 0, ), ) - assert.NotEqual( + assert.Equal( t, - NodeIndexKey(nil, 0, []resource.Quantity{resource.MustParse("1"), resource.MustParse("2")}), + NodeIndexKey(nil, 0, []int64{1, 1500}), RoundedNodeIndexKeyFromResourceList( nil, 0, - []string{"foo", "bar"}, - []int64{1000, 1500}, - schedulerobjects.ResourceList{ - Resources: map[string]resource.Quantity{"foo": resource.MustParse("1"), "bar": resource.MustParse("2")}, - }, + []string{"memory", "cpu"}, + []int64{1, 1500}, + testfixtures.TestResourceListFactory.FromNodeProto(map[string]resource.Quantity{"memory": resource.MustParse("1"), "cpu": resource.MustParse("2")}), 0, ), ) } - -func BenchmarkEncodeQuantityBuffer(b *testing.B) { - out := make([]byte, 8) - q := resource.MustParse("16Gi") - b.ResetTimer() - for n := 0; n < b.N; n++ { - out = out[0:0] - EncodeQuantity(out, q) - } -} diff --git a/internal/scheduler/nodedb/nodedb.go b/internal/scheduler/nodedb/nodedb.go index a02dacf6172..1259c930933 100644 --- a/internal/scheduler/nodedb/nodedb.go +++ b/internal/scheduler/nodedb/nodedb.go @@ -14,19 +14,16 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadaerrors" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/common/util" - schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" koTaint "github.com/armadaproject/armada/internal/scheduler/kubernetesobjects/taint" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/armadaproject/armada/pkg/api" ) const ( @@ -46,7 +43,7 @@ func (nodeDb *NodeDb) create(node *schedulerobjects.Node) (*internaltypes.Node, } labels := nodeDb.copyMapWithIntern(node.GetLabels()) - labels[schedulerconfig.NodeIdLabel] = node.Id + labels[configuration.NodeIdLabel] = node.Id totalResources := node.TotalResources @@ -57,27 +54,18 @@ func (nodeDb *NodeDb) create(node *schedulerobjects.Node) (*internaltypes.Node, nodeDb.indexedNodeLabels, ) - allocatableByPriority := schedulerobjects.AllocatableByPriorityAndResourceType(node.AllocatableByPriorityAndResource).DeepCopy() + allocatableByPriority := map[int32]internaltypes.ResourceList{} minimumPriority := int32(math.MaxInt32) - for p := range allocatableByPriority { + for p, rl := range node.AllocatableByPriorityAndResource { if p < minimumPriority { minimumPriority = p } + allocatableByPriority[p] = nodeDb.resourceListFactory.FromNodeProto(rl.Resources) } if minimumPriority < 0 { return nil, errors.Errorf("found negative priority %d on node %s; negative priorities are reserved for internal use", minimumPriority, node.Id) } - allocatableByPriority[evictedPriority] = allocatableByPriority[minimumPriority].DeepCopy() - - allocatedByQueue := node.AllocatedByQueue - if allocatedByQueue == nil { - allocatedByQueue = make(map[string]schedulerobjects.ResourceList) - } - - allocatedByJobId := node.AllocatedByJobId - if allocatedByJobId == nil { - allocatedByJobId = make(map[string]schedulerobjects.ResourceList) - } + allocatableByPriority[evictedPriority] = allocatableByPriority[minimumPriority] evictedJobRunIds := node.EvictedJobRunIds if evictedJobRunIds == nil { @@ -105,10 +93,10 @@ func (nodeDb *NodeDb) create(node *schedulerobjects.Node) (*internaltypes.Node, node.Name, taints, labels, - totalResources, + nodeDb.resourceListFactory.FromNodeProto(totalResources.Resources), allocatableByPriority, - allocatedByQueue, - allocatedByJobId, + fromMapKToJobResourcesIgnoreUnknown(nodeDb.resourceListFactory, node.AllocatedByQueue), + fromMapKToJobResourcesIgnoreUnknown(nodeDb.resourceListFactory, node.AllocatedByJobId), evictedJobRunIds, nil), nil } @@ -121,25 +109,13 @@ func (nodeDb *NodeDb) copyMapWithIntern(labels map[string]string) map[string]str return result } -func (nodeDb *NodeDb) CreateAndInsertWithApiJobsWithTxn(txn *memdb.Txn, jobs []*api.Job, node *schedulerobjects.Node) error { - entry, err := nodeDb.create(node) - if err != nil { - return err +// Ignore unknown resources, round up. +func fromMapKToJobResourcesIgnoreUnknown[K comparable](factory *internaltypes.ResourceListFactory, m map[K]schedulerobjects.ResourceList) map[K]internaltypes.ResourceList { + result := make(map[K]internaltypes.ResourceList, len(m)) + for k, v := range m { + result[k] = factory.FromJobResourceListIgnoreUnknown(v.Resources) } - for _, job := range jobs { - priority, ok := job.GetScheduledAtPriority() - if !ok { - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(nodeDb.priorityClasses, nodeDb.defaultPriorityClass, job) - priority = priorityClass.Priority - } - if err := nodeDb.bindJobToNodeInPlace(entry, job, priority); err != nil { - return err - } - } - if err := nodeDb.UpsertWithTxn(txn, entry); err != nil { - return err - } - return nil + return result } func (nodeDb *NodeDb) CreateAndInsertWithJobDbJobsWithTxn(txn *memdb.Txn, jobs []*jobdb.Job, node *schedulerobjects.Node) error { @@ -148,9 +124,9 @@ func (nodeDb *NodeDb) CreateAndInsertWithJobDbJobsWithTxn(txn *memdb.Txn, jobs [ return err } for _, job := range jobs { - priority, ok := job.GetScheduledAtPriority() + priority, ok := job.ScheduledAtPriority() if !ok { - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(nodeDb.priorityClasses, nodeDb.defaultPriorityClass, job) + priorityClass := job.PriorityClass() priority = priorityClass.Priority } if err := nodeDb.bindJobToNodeInPlace(entry, job, priority); err != nil { @@ -181,22 +157,10 @@ type EvictedJobSchedulingContext struct { type NodeDb struct { // In-memory database storing *Node. db *memdb.MemDB - // Once a node has been found on which a pod can be scheduled, - // the NodeDb will consider up to the next maxExtraNodesToConsider nodes. - // The NodeDb selects the node with the best score out of the considered nodes. - // In particular, the score expresses whether preemption is necessary to schedule a pod. - // Hence, a larger maxExtraNodesToConsider would reduce the expected number of preemptions. - // - // TODO: Currently gives no benefit. Since all nodes are given the same score. - maxExtraNodesToConsider uint // Allowed priority classes. // Because the number of database indices scales linearly with the number of distinct priorities, // the efficiency of the NodeDb relies on the number of distinct priorities being small. priorityClasses map[string]types.PriorityClass - // defaultPriorityClass is the name of the default priority class; it is - // used for jobs that specify a priority class that does not appear in - // priorityClasses, for example because it was deleted. - defaultPriorityClass string // Priority class priorities and NodeDb-internal priority, in increasing order. nodeDbPriorities []int32 // Resources, e.g., "cpu", "memory", and "nvidia.com/gpu", @@ -204,12 +168,18 @@ type NodeDb struct { indexedResources []string // Like indexedResources, but stored as a map for efficient lookup. indexedResourcesSet map[string]interface{} - // The resolution with which indexed resources are tracked. In the same order as indexedResources. - // For example, if indexedResources = []string{"cpu"} and indexedResourceResolutionMillis = []int64{1000}, - // then nodes with, e.g., 2000, 2100, and 2900 mCPU allocatable are all registered as having 2000 mCPU allocatable. + // The resolution with which indexed resources are tracked. + // In the same order as indexedResources above. + // In the same units as the supportedResourceType. + // + // For example if + // - there is one indexedResource called cpu, with resolution 1. + // - supportedResourceType cpu has resolution 1m. + // then indexedResourceResolution will be []int64{1000}, + // then nodes with, e.g., 2000, 2100, and 2900 mCPU allocatable will be all registered as having 2000 mCPU allocatable. // // Lower resolution makes scheduling faster, but may lead to jobs incorrectly being considered unschedulable. - indexedResourceResolutionMillis []int64 + indexedResourceResolution []int64 // Map from priority class priority to the database index tracking allocatable resources at that priority. indexNameByPriority map[int32]string // Map from priority class priority to the index of node.keys corresponding to that priority. @@ -261,21 +231,23 @@ type NodeDb struct { scheduledAtPriorityByJobId map[string]int32 stringInterner *stringinterner.StringInterner + + resourceListFactory *internaltypes.ResourceListFactory } func NewNodeDb( priorityClasses map[string]types.PriorityClass, - maxExtraNodesToConsider uint, - indexedResources []configuration.IndexedResource, + indexedResources []configuration.ResourceType, indexedTaints []string, indexedNodeLabels []string, wellKnownNodeTypes []configuration.WellKnownNodeType, stringInterner *stringinterner.StringInterner, + resourceListFactory *internaltypes.ResourceListFactory, ) (*NodeDb, error) { nodeDbPriorities := []int32{evictedPriority} nodeDbPriorities = append(nodeDbPriorities, types.AllowedPriorities(priorityClasses)...) - indexedResourceNames := util.Map(indexedResources, func(v configuration.IndexedResource) string { return v.Name }) + indexedResourceNames := slices.Map(indexedResources, func(v configuration.ResourceType) string { return v.Name }) schema, indexNameByPriority, keyIndexByPriority := nodeDbSchema(nodeDbPriorities, indexedResourceNames) db, err := memdb.NewMemDB(schema) if err != nil { @@ -307,31 +279,34 @@ func NewNodeDb( } return rv } + + indexedResourceResolution, err := makeIndexedResourceResolution(indexedResources, resourceListFactory) + if err != nil { + return nil, err + } + nodeDb := NodeDb{ - priorityClasses: priorityClasses, - nodeDbPriorities: nodeDbPriorities, - maxExtraNodesToConsider: maxExtraNodesToConsider, - indexedResources: indexedResourceNames, - indexedResourcesSet: mapFromSlice(indexedResourceNames), - indexedResourceResolutionMillis: util.Map( - indexedResources, - func(v configuration.IndexedResource) int64 { return v.Resolution.MilliValue() }, - ), - indexNameByPriority: indexNameByPriority, - keyIndexByPriority: keyIndexByPriority, - indexedTaints: mapFromSlice(indexedTaints), - indexedNodeLabels: mapFromSlice(indexedNodeLabels), - indexedNodeLabelValues: indexedNodeLabelValues, - nodeTypes: make(map[uint64]*schedulerobjects.NodeType), - wellKnownNodeTypes: make(map[string]*configuration.WellKnownNodeType), - numNodesByNodeType: make(map[uint64]int), - totalResources: schedulerobjects.ResourceList{Resources: make(map[string]resource.Quantity)}, - db: db, + priorityClasses: priorityClasses, + nodeDbPriorities: nodeDbPriorities, + indexedResources: indexedResourceNames, + indexedResourcesSet: mapFromSlice(indexedResourceNames), + indexedResourceResolution: indexedResourceResolution, + indexNameByPriority: indexNameByPriority, + keyIndexByPriority: keyIndexByPriority, + indexedTaints: mapFromSlice(indexedTaints), + indexedNodeLabels: mapFromSlice(indexedNodeLabels), + indexedNodeLabelValues: indexedNodeLabelValues, + nodeTypes: make(map[uint64]*schedulerobjects.NodeType), + wellKnownNodeTypes: make(map[string]*configuration.WellKnownNodeType), + numNodesByNodeType: make(map[uint64]int), + totalResources: schedulerobjects.ResourceList{Resources: make(map[string]resource.Quantity)}, + db: db, // Set the initial capacity (somewhat arbitrarily) to 128 reasons. podRequirementsNotMetReasonStringCache: make(map[uint64]string, 128), scheduledAtPriorityByJobId: make(map[string]int32), stringInterner: stringInterner, + resourceListFactory: resourceListFactory, } for _, wellKnownNodeType := range wellKnownNodeTypes { @@ -342,6 +317,29 @@ func NewNodeDb( return &nodeDb, nil } +func makeIndexedResourceResolution(indexedResourceTypes []configuration.ResourceType, resourceListFactory *internaltypes.ResourceListFactory) ([]int64, error) { + if len(indexedResourceTypes) < 1 { + return nil, errors.New("must specify at least one entry in indexedResources in config") + } + + result := make([]int64, len(indexedResourceTypes)) + for i, indexedResourceType := range indexedResourceTypes { + nativeScale, err := resourceListFactory.GetScale(indexedResourceType.Name) + if err != nil { + return nil, fmt.Errorf("config error: resource %q specified in indexedResources but not in supportedResourceTypes", indexedResourceType.Name) + } + result[i] = indexedResourceType.Resolution.ScaledValue(nativeScale) + if result[i] <= 0 { + return nil, fmt.Errorf("config error: invalid resolution specified in indexedResources for resource %q (possibly missing, zero, or negative)", indexedResourceType.Name) + } + if indexedResourceType.Resolution.Cmp(*resource.NewScaledQuantity(1, nativeScale)) == -1 { + return nil, fmt.Errorf("config error: resolution specified in indexedResources for resource %q is smaller than the resolution specified in supportedResourceTypes", indexedResourceType.Name) + } + } + + return result, nil +} + // Reset clears out data specific to one scheduling round to prepare for a new scheduling round. // Only necessary when nodeDb.enableNewPreemptionStrategy is true. func (nodeDb *NodeDb) Reset() error { @@ -468,43 +466,34 @@ func NodeJobDiff(txnA, txnB *memdb.Txn) (map[string]*internaltypes.Node, map[str func (nodeDb *NodeDb) ScheduleManyWithTxn(txn *memdb.Txn, gctx *schedulercontext.GangSchedulingContext) (bool, error) { // Attempt to schedule pods one by one in a transaction. - numScheduled := 0 for _, jctx := range gctx.JobSchedulingContexts { // In general, we may attempt to schedule a gang multiple times (in // order to find the best fit for this gang); clear out any remnants of // previous attempts. jctx.UnschedulableReason = "" - jctx.ShouldFail = false node, err := nodeDb.SelectNodeForJobWithTxn(txn, jctx) if err != nil { return false, err } - if node == nil { - // Indicates that when the min cardinality is met, we should fail this job back to the client. - jctx.ShouldFail = true - continue - } - - // If we found a node for this pod, bind it and continue to the next pod. - if node, err := nodeDb.bindJobToNode(node, jctx.Job, jctx.PodSchedulingContext.ScheduledAtPriority); err != nil { - return false, err - } else { - if err := nodeDb.UpsertWithTxn(txn, node); err != nil { + if node != nil { + // If we found a node for this pod, bind it and continue to the next pod. + if node, err := nodeDb.bindJobToNode(node, jctx.Job, jctx.PodSchedulingContext.ScheduledAtPriority); err != nil { return false, err + } else { + if err := nodeDb.UpsertWithTxn(txn, node); err != nil { + return false, err + } } - } - // Once a job is scheduled, it should no longer be considered for preemption. - if err := deleteEvictedJobSchedulingContextIfExistsWithTxn(txn, jctx.JobId); err != nil { - return false, err + // Once a job is scheduled, it should no longer be considered for preemption. + if err := deleteEvictedJobSchedulingContextIfExistsWithTxn(txn, jctx.JobId); err != nil { + return false, err + } + } else { + return false, nil } - - numScheduled++ - } - if numScheduled < gctx.GangInfo.MinimumCardinality { - return false, nil } return true, nil } @@ -522,7 +511,7 @@ func deleteEvictedJobSchedulingContextIfExistsWithTxn(txn *memdb.Txn, jobId stri // SelectNodeForJobWithTxn selects a node on which the job can be scheduled. func (nodeDb *NodeDb) SelectNodeForJobWithTxn(txn *memdb.Txn, jctx *schedulercontext.JobSchedulingContext) (*internaltypes.Node, error) { req := jctx.PodRequirements - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(nodeDb.priorityClasses, nodeDb.defaultPriorityClass, jctx.Job) + priorityClass := jctx.Job.PriorityClass() // If the job has already been scheduled, get the priority at which it was scheduled. // Otherwise, get the original priority the job was submitted with. @@ -555,7 +544,7 @@ func (nodeDb *NodeDb) SelectNodeForJobWithTxn(txn *memdb.Txn, jctx *schedulercon }() // If the nodeIdLabel selector is set, consider only that node. - if nodeId, ok := jctx.GetNodeSelector(schedulerconfig.NodeIdLabel); ok { + if nodeId, ok := jctx.GetNodeSelector(configuration.NodeIdLabel); ok { if it, err := txn.Get("nodes", "id", nodeId); err != nil { return nil, errors.WithStack(err) } else { @@ -737,11 +726,9 @@ func (nodeDb *NodeDb) selectNodeForPodAtPriority( matchingNodeTypeIds []uint64, priority int32, ) (*internaltypes.Node, error) { - req := jctx.PodRequirements - - indexResourceRequests := make([]resource.Quantity, len(nodeDb.indexedResources)) + indexResourceRequests := make([]int64, len(nodeDb.indexedResources)) for i, t := range nodeDb.indexedResources { - indexResourceRequests[i] = req.ResourceRequirements.Requests[v1.ResourceName(t)] + indexResourceRequests[i] = jctx.ResourceRequirements.GetByNameZeroIfMissing(t) } indexName, ok := nodeDb.indexNameByPriority[priority] if !ok { @@ -759,7 +746,7 @@ func (nodeDb *NodeDb) selectNodeForPodAtPriority( keyIndex, nodeDb.indexedResources, indexResourceRequests, - nodeDb.indexedResourceResolutionMillis, + nodeDb.indexedResourceResolution, ) if err != nil { return nil, err @@ -781,42 +768,27 @@ func (nodeDb *NodeDb) selectNodeForPodWithItAtPriority( onlyCheckDynamicRequirements bool, ) (*internaltypes.Node, error) { var selectedNode *internaltypes.Node - var selectedNodeScore int - var numExtraNodes uint for obj := it.Next(); obj != nil; obj = it.Next() { - if selectedNode != nil { - numExtraNodes++ - if numExtraNodes > nodeDb.maxExtraNodesToConsider { - break - } - } - node := obj.(*internaltypes.Node) if node == nil { return nil, nil } var matches bool - var score int var reason PodRequirementsNotMetReason var err error if onlyCheckDynamicRequirements { - matches, score, reason = DynamicJobRequirementsMet(node.AllocatableByPriority[priority], jctx) + matches, reason = DynamicJobRequirementsMet(node.AllocatableByPriority[priority], jctx) } else { - matches, score, reason, err = jobRequirementsMet(node, priority, jctx) + matches, reason, err = JobRequirementsMet(node, priority, jctx) } if err != nil { return nil, err } if matches { - if selectedNode == nil || score > selectedNodeScore { - selectedNode = node - selectedNodeScore = score - if selectedNodeScore == SchedulableBestScore { - break - } - } + selectedNode = node + break } else { s := nodeDb.stringFromPodRequirementsNotMetReason(reason) jctx.PodSchedulingContext.NumExcludedNodesByReason[s] += 1 @@ -849,7 +821,7 @@ func (nodeDb *NodeDb) selectNodeForJobWithFairPreemption(txn *memdb.Txn, jctx *s for obj := it.Next(); obj != nil && selectedNode == nil; obj = it.Next() { evictedJobSchedulingContext := obj.(*EvictedJobSchedulingContext) evictedJctx := evictedJobSchedulingContext.JobSchedulingContext - nodeId, ok := evictedJctx.GetNodeSelector(schedulerconfig.NodeIdLabel) + nodeId, ok := evictedJctx.GetNodeSelector(configuration.NodeIdLabel) if !ok { return nil, errors.Errorf("evicted job %s does not have a nodeIdLabel", evictedJctx.JobId) } @@ -876,7 +848,7 @@ func (nodeDb *NodeDb) selectNodeForJobWithFairPreemption(txn *memdb.Txn, jctx *s if priority > maxPriority { maxPriority = priority } - matches, _, reason, err := jobRequirementsMet( + matches, reason, err := JobRequirementsMet( node, // At this point, we've unbound the jobs running on the node. // Hence, we should check if the job is schedulable at evictedPriority, @@ -907,7 +879,7 @@ func (nodeDb *NodeDb) selectNodeForJobWithFairPreemption(txn *memdb.Txn, jctx *s } // bindJobToNode returns a copy of node with job bound to it. -func (nodeDb *NodeDb) bindJobToNode(node *internaltypes.Node, job interfaces.LegacySchedulerJob, priority int32) (*internaltypes.Node, error) { +func (nodeDb *NodeDb) bindJobToNode(node *internaltypes.Node, job *jobdb.Job, priority int32) (*internaltypes.Node, error) { node = node.UnsafeCopy() if err := nodeDb.bindJobToNodeInPlace(node, job, priority); err != nil { return nil, err @@ -916,37 +888,35 @@ func (nodeDb *NodeDb) bindJobToNode(node *internaltypes.Node, job interfaces.Leg } // bindJobToNodeInPlace is like bindJobToNode, but doesn't make a copy of node. -func (nodeDb *NodeDb) bindJobToNodeInPlace(node *internaltypes.Node, job interfaces.LegacySchedulerJob, priority int32) error { - jobId := job.GetId() - requests := job.GetResourceRequirements().Requests +func (nodeDb *NodeDb) bindJobToNodeInPlace(node *internaltypes.Node, job *jobdb.Job, priority int32) error { + jobId := job.Id() + requests := job.EfficientResourceRequirements() _, isEvicted := node.EvictedJobRunIds[jobId] delete(node.EvictedJobRunIds, jobId) if !isEvicted { if node.AllocatedByJobId == nil { - node.AllocatedByJobId = make(map[string]schedulerobjects.ResourceList) + node.AllocatedByJobId = make(map[string]internaltypes.ResourceList) } if allocatedToJob, ok := node.AllocatedByJobId[jobId]; ok { return errors.Errorf("job %s already has resources allocated on node %s", jobId, node.GetId()) } else { - allocatedToJob.AddV1ResourceList(requests) - node.AllocatedByJobId[jobId] = allocatedToJob + node.AllocatedByJobId[jobId] = allocatedToJob.Add(requests) } if node.AllocatedByQueue == nil { - node.AllocatedByQueue = make(map[string]schedulerobjects.ResourceList) + node.AllocatedByQueue = make(map[string]internaltypes.ResourceList) } - queue := job.GetQueue() + queue := job.Queue() allocatedToQueue := node.AllocatedByQueue[queue] - allocatedToQueue.AddV1ResourceList(requests) - node.AllocatedByQueue[queue] = allocatedToQueue + node.AllocatedByQueue[queue] = allocatedToQueue.Add(requests) } allocatable := node.AllocatableByPriority - allocatable.MarkAllocatedV1ResourceList(priority, requests) + markAllocated(allocatable, priority, requests) if isEvicted { - allocatable.MarkAllocatableV1ResourceList(evictedPriority, requests) + markAllocatable(allocatable, evictedPriority, requests) } nodeDb.scheduledAtPriorityByJobId[jobId] = priority @@ -965,11 +935,11 @@ func (nodeDb *NodeDb) bindJobToNodeInPlace(node *internaltypes.Node, job interfa // AllocatedByQueue. func (nodeDb *NodeDb) EvictJobsFromNode( priorityClasses map[string]types.PriorityClass, - jobFilter func(interfaces.LegacySchedulerJob) bool, - jobs []interfaces.LegacySchedulerJob, + jobFilter func(*jobdb.Job) bool, + jobs []*jobdb.Job, node *internaltypes.Node, -) ([]interfaces.LegacySchedulerJob, *internaltypes.Node, error) { - evicted := make([]interfaces.LegacySchedulerJob, 0) +) ([]*jobdb.Job, *internaltypes.Node, error) { + evicted := make([]*jobdb.Job, 0) node = node.UnsafeCopy() for _, job := range jobs { if jobFilter != nil && !jobFilter(job) { @@ -984,13 +954,13 @@ func (nodeDb *NodeDb) EvictJobsFromNode( } // evictJobFromNodeInPlace is the in-place operation backing EvictJobsFromNode. -func (nodeDb *NodeDb) evictJobFromNodeInPlace(priorityClasses map[string]types.PriorityClass, job interfaces.LegacySchedulerJob, node *internaltypes.Node) error { - jobId := job.GetId() +func (nodeDb *NodeDb) evictJobFromNodeInPlace(priorityClasses map[string]types.PriorityClass, job *jobdb.Job, node *internaltypes.Node) error { + jobId := job.Id() if _, ok := node.AllocatedByJobId[jobId]; !ok { return errors.Errorf("job %s has no resources allocated on node %s", jobId, node.GetId()) } - queue := job.GetQueue() + queue := job.Queue() if _, ok := node.AllocatedByQueue[queue]; !ok { return errors.Errorf("queue %s has no resources allocated on node %s", queue, node.GetId()) } @@ -1003,20 +973,36 @@ func (nodeDb *NodeDb) evictJobFromNodeInPlace(priorityClasses map[string]types.P } node.EvictedJobRunIds[jobId] = true - allocatable := node.AllocatableByPriority + allocatableByPriority := node.AllocatableByPriority priority, ok := nodeDb.GetScheduledAtPriority(jobId) if !ok { return errors.Errorf("job %s not mapped to a priority", jobId) } - requests := job.GetResourceRequirements().Requests - allocatable.MarkAllocatableV1ResourceList(priority, requests) - allocatable.MarkAllocatedV1ResourceList(evictedPriority, requests) + jobRequests := job.EfficientResourceRequirements() + markAllocatable(allocatableByPriority, priority, jobRequests) + markAllocated(allocatableByPriority, evictedPriority, jobRequests) return nil } +func markAllocated(allocatableByPriority map[int32]internaltypes.ResourceList, priorityCutoff int32, rs internaltypes.ResourceList) { + markAllocatable(allocatableByPriority, priorityCutoff, rs.Negate()) +} + +func markAllocatable(allocatableByPriority map[int32]internaltypes.ResourceList, priorityCutoff int32, rs internaltypes.ResourceList) { + priorities := make([]int32, 0, len(allocatableByPriority)) + for priority := range allocatableByPriority { + if priority <= priorityCutoff { + priorities = append(priorities, priority) + } + } + for _, priority := range priorities { + allocatableByPriority[priority] = allocatableByPriority[priority].Add(rs) + } +} + // UnbindJobsFromNode returns a node with all elements of jobs unbound from it. -func (nodeDb *NodeDb) UnbindJobsFromNode(priorityClasses map[string]types.PriorityClass, jobs []interfaces.LegacySchedulerJob, node *internaltypes.Node) (*internaltypes.Node, error) { +func (nodeDb *NodeDb) UnbindJobsFromNode(priorityClasses map[string]types.PriorityClass, jobs []*jobdb.Job, node *internaltypes.Node) (*internaltypes.Node, error) { node = node.UnsafeCopy() for _, job := range jobs { if err := nodeDb.unbindJobFromNodeInPlace(priorityClasses, job, node); err != nil { @@ -1027,7 +1013,7 @@ func (nodeDb *NodeDb) UnbindJobsFromNode(priorityClasses map[string]types.Priori } // UnbindJobFromNode returns a copy of node with job unbound from it. -func (nodeDb *NodeDb) UnbindJobFromNode(priorityClasses map[string]types.PriorityClass, job interfaces.LegacySchedulerJob, node *internaltypes.Node) (*internaltypes.Node, error) { +func (nodeDb *NodeDb) UnbindJobFromNode(priorityClasses map[string]types.PriorityClass, job *jobdb.Job, node *internaltypes.Node) (*internaltypes.Node, error) { node = node.UnsafeCopy() if err := nodeDb.unbindJobFromNodeInPlace(priorityClasses, job, node); err != nil { return nil, err @@ -1036,9 +1022,9 @@ func (nodeDb *NodeDb) UnbindJobFromNode(priorityClasses map[string]types.Priorit } // unbindPodFromNodeInPlace is like UnbindJobFromNode, but doesn't make a copy of node. -func (nodeDb *NodeDb) unbindJobFromNodeInPlace(priorityClasses map[string]types.PriorityClass, job interfaces.LegacySchedulerJob, node *internaltypes.Node) error { - jobId := job.GetId() - requests := job.GetResourceRequirements().Requests +func (nodeDb *NodeDb) unbindJobFromNodeInPlace(priorityClasses map[string]types.PriorityClass, job *jobdb.Job, node *internaltypes.Node) error { + jobId := job.Id() + requests := job.EfficientResourceRequirements() _, isEvicted := node.EvictedJobRunIds[jobId] delete(node.EvictedJobRunIds, jobId) @@ -1050,13 +1036,15 @@ func (nodeDb *NodeDb) unbindJobFromNodeInPlace(priorityClasses map[string]types. delete(node.AllocatedByJobId, jobId) } - queue := job.GetQueue() + queue := job.Queue() if allocatedToQueue, ok := node.AllocatedByQueue[queue]; !ok { return errors.Errorf("queue %s has no resources allocated on node %s", queue, node.GetId()) } else { - allocatedToQueue.SubV1ResourceList(requests) - if allocatedToQueue.IsZero() { + allocatedToQueue = allocatedToQueue.Subtract(requests) + if allocatedToQueue.AllZero() { delete(node.AllocatedByQueue, queue) + } else { + node.AllocatedByQueue[queue] = allocatedToQueue } } @@ -1071,7 +1059,7 @@ func (nodeDb *NodeDb) unbindJobFromNodeInPlace(priorityClasses map[string]types. return errors.Errorf("job %s not mapped to a priority", jobId) } } - allocatable.MarkAllocatableV1ResourceList(priority, requests) + markAllocatable(allocatable, priority, requests) return nil } @@ -1148,7 +1136,7 @@ func (nodeDb *NodeDb) ClearAllocated() error { newNodes := make([]*internaltypes.Node, 0) for node := it.NextNode(); node != nil; node = it.NextNode() { node = node.UnsafeCopy() - node.AllocatableByPriority = schedulerobjects.NewAllocatableByPriorityAndResourceType( + node.AllocatableByPriority = newAllocatableByPriorityAndResourceType( nodeDb.nodeDbPriorities, node.TotalResources, ) @@ -1161,6 +1149,14 @@ func (nodeDb *NodeDb) ClearAllocated() error { return nil } +func newAllocatableByPriorityAndResourceType(priorities []int32, rl internaltypes.ResourceList) map[int32]internaltypes.ResourceList { + rv := make(map[int32]internaltypes.ResourceList, len(priorities)) + for _, priority := range priorities { + rv[priority] = rl + } + return rv +} + func (nodeDb *NodeDb) AddEvictedJobSchedulingContextWithTxn(txn *memdb.Txn, index int, jctx *schedulercontext.JobSchedulingContext) error { if it, err := txn.Get("evictedJobs", "id", jctx.JobId); err != nil { return errors.WithStack(err) @@ -1247,14 +1243,14 @@ func (nodeDb *NodeDb) stringFromPodRequirementsNotMetReason(reason PodRequiremen } // nodeDbKey returns the index key for a particular node. -// Allocatable resources are rounded down to the closest multiple of nodeDb.indexedResourceResolutionMillis. +// Allocatable resources are rounded down to the closest multiple of nodeDb.indexedResourceResolution. // This improves efficiency by reducing the number of distinct values in the index. -func (nodeDb *NodeDb) nodeDbKey(out []byte, nodeTypeId uint64, allocatable schedulerobjects.ResourceList, nodeIndex uint64) []byte { +func (nodeDb *NodeDb) nodeDbKey(out []byte, nodeTypeId uint64, allocatable internaltypes.ResourceList, nodeIndex uint64) []byte { return RoundedNodeIndexKeyFromResourceList( out, nodeTypeId, nodeDb.indexedResources, - nodeDb.indexedResourceResolutionMillis, + nodeDb.indexedResourceResolution, allocatable, nodeIndex, ) diff --git a/internal/scheduler/nodedb/nodedb_test.go b/internal/scheduler/nodedb/nodedb_test.go index a66eda3bbe9..b996cde278e 100644 --- a/internal/scheduler/nodedb/nodedb_test.go +++ b/internal/scheduler/nodedb/nodedb_test.go @@ -10,14 +10,11 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" armadamaps "github.com/armadaproject/armada/internal/common/maps" "github.com/armadaproject/armada/internal/common/stringinterner" - "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" @@ -127,10 +124,13 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { entry, err := nodeDb.GetNode(node.Id) require.NoError(t, err) - jobFilter := func(job interfaces.LegacySchedulerJob) bool { return true } + jobFilter := func(job *jobdb.Job) bool { return true } job := testfixtures.Test1GpuJob("A", testfixtures.PriorityClass0) - request := schedulerobjects.ResourceListFromV1ResourceList(job.GetResourceRequirements().Requests) - jobId := job.GetId() + request := job.EfficientResourceRequirements() + requestInternalRl, err := nodeDb.resourceListFactory.FromJobResourceListFailOnUnknown(job.ResourceRequirements().Requests) + assert.Nil(t, err) + + jobId := job.Id() boundNode, err := nodeDb.bindJobToNode(entry, job, job.PodRequirements().Priority) require.NoError(t, err) @@ -138,12 +138,12 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { unboundNode, err := nodeDb.UnbindJobFromNode(testfixtures.TestPriorityClasses, job, boundNode) require.NoError(t, err) - unboundMultipleNode, err := nodeDb.UnbindJobsFromNode(testfixtures.TestPriorityClasses, []interfaces.LegacySchedulerJob{job}, boundNode) + unboundMultipleNode, err := nodeDb.UnbindJobsFromNode(testfixtures.TestPriorityClasses, []*jobdb.Job{job}, boundNode) require.NoError(t, err) - evictedJobs, evictedNode, err := nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []interfaces.LegacySchedulerJob{job}, boundNode) + evictedJobs, evictedNode, err := nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []*jobdb.Job{job}, boundNode) require.NoError(t, err) - assert.Equal(t, []interfaces.LegacySchedulerJob{job}, evictedJobs) + assert.Equal(t, []*jobdb.Job{job}, evictedJobs) evictedUnboundNode, err := nodeDb.UnbindJobFromNode(testfixtures.TestPriorityClasses, job, evictedNode) require.NoError(t, err) @@ -151,7 +151,7 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { evictedBoundNode, err := nodeDb.bindJobToNode(evictedNode, job, job.PodRequirements().Priority) require.NoError(t, err) - _, _, err = nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []interfaces.LegacySchedulerJob{job}, entry) + _, _, err = nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []*jobdb.Job{job}, entry) require.Error(t, err) _, err = nodeDb.UnbindJobFromNode(testfixtures.TestPriorityClasses, job, entry) @@ -160,7 +160,7 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { _, err = nodeDb.bindJobToNode(boundNode, job, job.PodRequirements().Priority) require.Error(t, err) - _, _, err = nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []interfaces.LegacySchedulerJob{job}, evictedNode) + _, _, err = nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, jobFilter, []*jobdb.Job{job}, evictedNode) require.Error(t, err) assertNodeAccountingEqual(t, entry, unboundNode) @@ -172,14 +172,14 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { assert.True( t, armadamaps.DeepEqual( - map[string]schedulerobjects.ResourceList{jobId: request}, + map[string]internaltypes.ResourceList{jobId: requestInternalRl}, boundNode.AllocatedByJobId, ), ) assert.True( t, armadamaps.DeepEqual( - map[string]schedulerobjects.ResourceList{jobId: request}, + map[string]internaltypes.ResourceList{jobId: requestInternalRl}, evictedNode.AllocatedByJobId, ), ) @@ -187,21 +187,21 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { assert.True( t, armadamaps.DeepEqual( - map[string]schedulerobjects.ResourceList{"A": request}, + map[string]internaltypes.ResourceList{"A": request}, boundNode.AllocatedByQueue, ), ) assert.True( t, armadamaps.DeepEqual( - map[string]schedulerobjects.ResourceList{"A": request}, + map[string]internaltypes.ResourceList{"A": request}, evictedNode.AllocatedByQueue, ), ) - expectedAllocatable := boundNode.TotalResources.DeepCopy() - expectedAllocatable.Sub(request) - priority := testfixtures.TestPriorityClasses[job.GetPriorityClassName()].Priority + expectedAllocatable := boundNode.TotalResources + expectedAllocatable = expectedAllocatable.Subtract(request) + priority := testfixtures.TestPriorityClasses[job.PriorityClassName()].Priority assert.True(t, expectedAllocatable.Equal(boundNode.AllocatableByPriority[priority])) assert.Empty(t, unboundNode.AllocatedByJobId) @@ -210,11 +210,9 @@ func TestNodeBindingEvictionUnbinding(t *testing.T) { } func assertNodeAccountingEqual(t *testing.T, node1, node2 *internaltypes.Node) { - allocatable1 := schedulerobjects.QuantityByTAndResourceType[int32](node1.AllocatableByPriority) - allocatable2 := schedulerobjects.QuantityByTAndResourceType[int32](node2.AllocatableByPriority) assert.True( t, - allocatable1.Equal(allocatable2), + armadamaps.DeepEqual(node1.AllocatableByPriority, node2.AllocatableByPriority), "expected %v, but got %v", node1.AllocatableByPriority, node2.AllocatableByPriority, @@ -253,20 +251,20 @@ func assertNodeAccountingEqual(t *testing.T, node1, node2 *internaltypes.Node) { func TestEviction(t *testing.T) { tests := map[string]struct { - jobFilter func(interfaces.LegacySchedulerJob) bool + jobFilter func(*jobdb.Job) bool expectedEvictions []int32 }{ "jobFilter always returns false": { - jobFilter: func(_ interfaces.LegacySchedulerJob) bool { return false }, + jobFilter: func(_ *jobdb.Job) bool { return false }, expectedEvictions: []int32{}, }, "jobFilter always returns true": { - jobFilter: func(_ interfaces.LegacySchedulerJob) bool { return true }, + jobFilter: func(_ *jobdb.Job) bool { return true }, expectedEvictions: []int32{0, 1}, }, "jobFilter returns true for preemptible jobs": { - jobFilter: func(job interfaces.LegacySchedulerJob) bool { - priorityClassName := job.GetPriorityClassName() + jobFilter: func(job *jobdb.Job) bool { + priorityClassName := job.PriorityClassName() priorityClass := testfixtures.TestPriorityClasses[priorityClassName] return priorityClass.Preemptible }, @@ -293,13 +291,13 @@ func TestEviction(t *testing.T) { entry, err := nodeDb.GetNode(node.Id) require.NoError(t, err) - existingJobs := make([]interfaces.LegacySchedulerJob, len(jobs)) + existingJobs := make([]*jobdb.Job, len(jobs)) for i, job := range jobs { existingJobs[i] = job } actualEvictions, _, err := nodeDb.EvictJobsFromNode(testfixtures.TestPriorityClasses, tc.jobFilter, existingJobs, entry) require.NoError(t, err) - expectedEvictions := make([]interfaces.LegacySchedulerJob, 0, len(tc.expectedEvictions)) + expectedEvictions := make([]*jobdb.Job, 0, len(tc.expectedEvictions)) for _, i := range tc.expectedEvictions { expectedEvictions = append(expectedEvictions, jobs[i]) } @@ -339,7 +337,7 @@ func TestScheduleIndividually(t *testing.T) { }, testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), ), - ExpectSuccess: testfixtures.Repeat(false, 1), + ExpectSuccess: testfixtures.Repeat(true, 1), // we ignore unknown resource types on jobs, should never happen in practice anyway as these should fail earlier. }, "preemption": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), @@ -467,8 +465,8 @@ func TestScheduleIndividually(t *testing.T) { node, err := nodeDb.GetNode(nodeId) require.NoError(t, err) require.NotNil(t, node) - expected := schedulerobjects.ResourceListFromV1ResourceList(job.GetResourceRequirements().Requests) - actual, ok := node.AllocatedByJobId[job.GetId()] + expected := job.EfficientResourceRequirements() + actual, ok := node.AllocatedByJobId[job.Id()] require.True(t, ok) assert.True(t, actual.Equal(expected)) } @@ -489,19 +487,13 @@ func TestScheduleMany(t *testing.T) { // For each group, whether we expect scheduling to succeed. ExpectSuccess []bool }{ - // Attempts to schedule 32 jobs with a minimum gang cardinality of 1 job. All jobs get scheduled. + // Attempts to schedule 32. All jobs get scheduled. "simple success": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: [][]*jobdb.Job{gangSuccess}, ExpectSuccess: []bool{true}, }, - // Attempts to schedule 33 jobs with a minimum gang cardinality of 32 jobs. One fails, but the overall result is a success. - "simple success with min cardinality": { - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Jobs: [][]*jobdb.Job{testfixtures.WithGangAnnotationsAndMinCardinalityJobs(32, testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33))}, - ExpectSuccess: []bool{true}, - }, - // Attempts to schedule 33 jobs with a minimum gang cardinality of 33. The overall result fails. + // Attempts to schedule 33 jobs. The overall result fails. "simple failure with min cardinality": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: [][]*jobdb.Job{gangFailure}, @@ -552,9 +544,7 @@ func TestScheduleMany(t *testing.T) { for _, jctx := range jctxs { pctx := jctx.PodSchedulingContext require.NotNil(t, pctx) - if !jctx.ShouldFail { - assert.NotEqual(t, "", pctx.NodeId) - } + assert.NotEqual(t, "", pctx.NodeId) } } }) @@ -562,36 +552,14 @@ func TestScheduleMany(t *testing.T) { } func TestAwayNodeTypes(t *testing.T) { - priorityClasses := map[string]types.PriorityClass{ - "armada-preemptible-away": { - Priority: 30000, - Preemptible: true, - - AwayNodeTypes: []types.AwayNodeType{ - {Priority: 29000, WellKnownNodeTypeName: "whale"}, - }, - }, - } - nodeDb, err := NewNodeDb( - priorityClasses, - testfixtures.TestMaxExtraNodesToConsider, + testfixtures.TestPriorityClasses, testfixtures.TestResources, testfixtures.TestIndexedTaints, testfixtures.TestIndexedNodeLabels, - []configuration.WellKnownNodeType{ - { - Name: "whale", - Taints: []v1.Taint{ - { - Key: "whale", - Value: "true", - Effect: v1.TaintEffectNoSchedule, - }, - }, - }, - }, + testfixtures.TestWellKnownNodeTypes, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) require.NoError(t, err) @@ -600,7 +568,7 @@ func TestAwayNodeTypes(t *testing.T) { node.Taints = append( node.Taints, v1.Taint{ - Key: "whale", + Key: "gpu", Value: "true", Effect: v1.TaintEffectNoSchedule, }, @@ -614,7 +582,7 @@ func TestAwayNodeTypes(t *testing.T) { "armada-preemptible-away", testfixtures.Test1Cpu4GiPodReqs(testfixtures.TestQueue, jobId, 30000), ) - jctx := schedulercontext.JobSchedulingContextFromJob(priorityClasses, job) + jctx := schedulercontext.JobSchedulingContextFromJob(job) require.Empty(t, jctx.AdditionalTolerations) gctx := schedulercontext.NewGangSchedulingContext([]*schedulercontext.JobSchedulingContext{jctx}) @@ -625,7 +593,7 @@ func TestAwayNodeTypes(t *testing.T) { t, []v1.Toleration{ { - Key: "whale", + Key: "gpu", Value: "true", Effect: v1.TaintEffectNoSchedule, }, @@ -634,15 +602,131 @@ func TestAwayNodeTypes(t *testing.T) { ) } +func TestMakeIndexedResourceResolution(t *testing.T) { + supportedResources := []schedulerconfig.ResourceType{ + { + Name: "unit-resource-1", + Resolution: resource.MustParse("1"), + }, + { + Name: "unit-resource-2", + Resolution: resource.MustParse("1"), + }, + { + Name: "un-indexed-resource", + Resolution: resource.MustParse("1"), + }, + { + Name: "milli-resource-1", + Resolution: resource.MustParse("1m"), + }, + { + Name: "milli-resource-2", + Resolution: resource.MustParse("1m"), + }, + } + + indexedResources := []schedulerconfig.ResourceType{ + { + Name: "unit-resource-1", + Resolution: resource.MustParse("1"), + }, + { + Name: "unit-resource-2", + Resolution: resource.MustParse("100"), + }, + { + Name: "milli-resource-1", + Resolution: resource.MustParse("1m"), + }, + { + Name: "milli-resource-2", + Resolution: resource.MustParse("1"), + }, + } + + resourceListFactory, err := internaltypes.MakeResourceListFactory(supportedResources) + assert.Nil(t, err) + assert.NotNil(t, resourceListFactory) + + result, err := makeIndexedResourceResolution(indexedResources, resourceListFactory) + assert.Nil(t, err) + assert.Equal(t, []int64{1, 100, 1, 1000}, result) +} + +func TestMakeIndexedResourceResolution_ErrorsOnUnsupportedResource(t *testing.T) { + supportedResources := []schedulerconfig.ResourceType{ + { + Name: "a-resource", + Resolution: resource.MustParse("1"), + }, + } + + indexedResources := []schedulerconfig.ResourceType{ + { + Name: "non-supported-resource", + Resolution: resource.MustParse("1"), + }, + } + + resourceListFactory, err := internaltypes.MakeResourceListFactory(supportedResources) + assert.Nil(t, err) + assert.NotNil(t, resourceListFactory) + + result, err := makeIndexedResourceResolution(indexedResources, resourceListFactory) + assert.NotNil(t, err) + assert.Nil(t, result) +} + +func TestMakeIndexedResourceResolution_ErrorsOnInvalidResolution(t *testing.T) { + supportedResources := []schedulerconfig.ResourceType{ + { + Name: "a-resource", + Resolution: resource.MustParse("1"), + }, + } + + resourceListFactory, err := internaltypes.MakeResourceListFactory(supportedResources) + assert.Nil(t, err) + assert.NotNil(t, resourceListFactory) + + result, err := makeIndexedResourceResolution([]schedulerconfig.ResourceType{ + { + Name: "a-resource", + Resolution: resource.MustParse("0"), + }, + }, resourceListFactory) + assert.NotNil(t, err) + assert.Nil(t, result) + + result, err = makeIndexedResourceResolution([]schedulerconfig.ResourceType{ + { + Name: "a-resource", + Resolution: resource.MustParse("-1"), + }, + }, resourceListFactory) + assert.NotNil(t, err) + assert.Nil(t, result) + + result, err = makeIndexedResourceResolution([]schedulerconfig.ResourceType{ + { + Name: "a-resource", + Resolution: resource.MustParse("0.1"), // this cannot be less than the supported resource type resolution, should error + }, + }, resourceListFactory) + assert.NotNil(t, err) + assert.Nil(t, result) +} + func benchmarkUpsert(nodes []*schedulerobjects.Node, b *testing.B) { nodeDb, err := NewNodeDb( testfixtures.TestPriorityClasses, - testfixtures.TestMaxExtraNodesToConsider, testfixtures.TestResources, testfixtures.TestIndexedTaints, testfixtures.TestIndexedNodeLabels, testfixtures.TestWellKnownNodeTypes, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) require.NoError(b, err) txn := nodeDb.Txn(true) @@ -677,12 +761,12 @@ func BenchmarkUpsert100000(b *testing.B) { func benchmarkScheduleMany(b *testing.B, nodes []*schedulerobjects.Node, jobs []*jobdb.Job) { nodeDb, err := NewNodeDb( testfixtures.TestPriorityClasses, - testfixtures.TestMaxExtraNodesToConsider, testfixtures.TestResources, testfixtures.TestIndexedTaints, testfixtures.TestIndexedNodeLabels, testfixtures.TestWellKnownNodeTypes, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) require.NoError(b, err) txn := nodeDb.Txn(true) @@ -803,12 +887,12 @@ func BenchmarkScheduleManyResourceConstrained(b *testing.B) { func newNodeDbWithNodes(nodes []*schedulerobjects.Node) (*NodeDb, error) { nodeDb, err := NewNodeDb( testfixtures.TestPriorityClasses, - testfixtures.TestMaxExtraNodesToConsider, testfixtures.TestResources, testfixtures.TestIndexedTaints, testfixtures.TestIndexedNodeLabels, testfixtures.TestWellKnownNodeTypes, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) if err != nil { return nil, err diff --git a/internal/scheduler/nodedb/nodeidindex_test.go b/internal/scheduler/nodedb/nodeidindex_test.go index 1a58c62d39b..84afe2f99d2 100644 --- a/internal/scheduler/nodedb/nodeidindex_test.go +++ b/internal/scheduler/nodedb/nodeidindex_test.go @@ -7,7 +7,6 @@ import ( v1 "k8s.io/api/core/v1" "github.com/armadaproject/armada/internal/scheduler/internaltypes" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) func TestFromObjectValid(t *testing.T) { @@ -51,10 +50,10 @@ func makeTestNode(id string) *internaltypes.Node { "node_name", []v1.Taint{}, map[string]string{}, - schedulerobjects.ResourceList{}, - schedulerobjects.AllocatableByPriorityAndResourceType{}, - map[string]schedulerobjects.ResourceList{}, - map[string]schedulerobjects.ResourceList{}, + internaltypes.ResourceList{}, + map[int32]internaltypes.ResourceList{}, + map[string]internaltypes.ResourceList{}, + map[string]internaltypes.ResourceList{}, map[string]bool{}, [][]byte{}, ) diff --git a/internal/scheduler/nodedb/nodeiteration.go b/internal/scheduler/nodedb/nodeiteration.go index 8a40c4c34a9..77a0cae80dc 100644 --- a/internal/scheduler/nodedb/nodeiteration.go +++ b/internal/scheduler/nodedb/nodeiteration.go @@ -8,7 +8,6 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" - "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/scheduler/internaltypes" ) @@ -161,8 +160,8 @@ func NewNodeTypesIterator( priority int32, keyIndex int, indexedResources []string, - indexedResourceRequests []resource.Quantity, - indexedResourceResolutionMillis []int64, + indexedResourceRequests []int64, + indexedResourceResolution []int64, ) (*NodeTypesIterator, error) { pq := &nodeTypesIteratorPQ{ priority: priority, @@ -178,7 +177,7 @@ func NewNodeTypesIterator( keyIndex, indexedResources, indexedResourceRequests, - indexedResourceResolutionMillis, + indexedResourceResolution, ) if err != nil { return nil, err @@ -250,11 +249,12 @@ func (it *nodeTypesIteratorPQ) less(a, b *internaltypes.Node) bool { allocatableByPriorityA := a.AllocatableByPriority[it.priority] allocatableByPriorityB := b.AllocatableByPriority[it.priority] for _, t := range it.indexedResources { - qa := allocatableByPriorityA.Get(t) - qb := allocatableByPriorityB.Get(t) - if cmp := qa.Cmp(qb); cmp == -1 { + qa := allocatableByPriorityA.GetByNameZeroIfMissing(t) + qb := allocatableByPriorityB.GetByNameZeroIfMissing(t) + + if qa < qb { return true - } else if cmp == 1 { + } else if qa > qb { return false } } @@ -305,14 +305,16 @@ type NodeTypeIterator struct { // NodeDb indexed resources. indexedResources []string // Pod requests for indexed resources in the same order as indexedResources. - indexedResourceRequests []resource.Quantity - // The resolution with which indexed resources are tracked. In the same order as indexedResources. - indexedResourceResolutionMillis []int64 + indexedResourceRequests []int64 + // The resolution with which indexed resources are tracked. + // In the same order as indexedResources/indexedResourceRequests above. + // In the same units as indexedResourceRequests above. + indexedResourceResolution []int64 // Current lower bound on node allocatable resources looked for. // Updated in-place as the iterator makes progress. - lowerBound []resource.Quantity + lowerBound []int64 // Tentative lower-bound. - newLowerBound []resource.Quantity + newLowerBound []int64 // memdb key computed from nodeTypeId and lowerBound. // Stored here to avoid dynamic allocs. key []byte @@ -333,29 +335,29 @@ func NewNodeTypeIterator( priority int32, keyIndex int, indexedResources []string, - indexedResourceRequests []resource.Quantity, - indexedResourceResolutionMillis []int64, + indexedResourceRequests []int64, + indexedResourceResolution []int64, ) (*NodeTypeIterator, error) { if len(indexedResources) != len(indexedResourceRequests) { return nil, errors.Errorf("indexedResources and resourceRequirements are not of equal length") } - if len(indexedResources) != len(indexedResourceResolutionMillis) { - return nil, errors.Errorf("indexedResources and indexedResourceResolutionMillis are not of equal length") + if len(indexedResources) != len(indexedResourceResolution) { + return nil, errors.Errorf("indexedResources and indexedResourceResolution are not of equal length") } if keyIndex < 0 { return nil, errors.Errorf("keyIndex is negative: %d", keyIndex) } it := &NodeTypeIterator{ - txn: txn, - nodeTypeId: nodeTypeId, - priority: priority, - keyIndex: keyIndex, - indexName: indexName, - indexedResources: indexedResources, - indexedResourceRequests: indexedResourceRequests, - indexedResourceResolutionMillis: indexedResourceResolutionMillis, - lowerBound: slices.Clone(indexedResourceRequests), - newLowerBound: slices.Clone(indexedResourceRequests), + txn: txn, + nodeTypeId: nodeTypeId, + priority: priority, + keyIndex: keyIndex, + indexName: indexName, + indexedResources: indexedResources, + indexedResourceRequests: indexedResourceRequests, + indexedResourceResolution: indexedResourceResolution, + lowerBound: slices.Clone(indexedResourceRequests), + newLowerBound: slices.Clone(indexedResourceRequests), } memdbIt, err := it.newNodeTypeIterator() if err != nil { @@ -415,17 +417,17 @@ func (it *NodeTypeIterator) NextNode() (*internaltypes.Node, error) { return nil, nil } allocatableByPriority := node.AllocatableByPriority[it.priority] - if len(allocatableByPriority.Resources) == 0 { + if allocatableByPriority.IsEmpty() { return nil, errors.Errorf("node %s has no resources registered at priority %d: %v", node.GetId(), it.priority, node.AllocatableByPriority) } for i, t := range it.indexedResources { - nodeQuantity := allocatableByPriority.Get(t).DeepCopy() - requestQuantity := it.indexedResourceRequests[i].DeepCopy() - it.newLowerBound[i] = roundQuantityToResolution(nodeQuantity, it.indexedResourceResolutionMillis[i]) + nodeQuantity := allocatableByPriority.GetByNameZeroIfMissing(t) + requestQuantity := it.indexedResourceRequests[i] + it.newLowerBound[i] = roundQuantityToResolution(nodeQuantity, it.indexedResourceResolution[i]) // If nodeQuantity < requestQuantity, replace the iterator using the lowerBound. // If nodeQuantity >= requestQuantity for all resources, return the node. - if nodeQuantity.Cmp(requestQuantity) == -1 { + if nodeQuantity < requestQuantity { for j := i; j < len(it.indexedResources); j++ { it.newLowerBound[j] = it.indexedResourceRequests[j] } diff --git a/internal/scheduler/nodedb/nodeiteration_test.go b/internal/scheduler/nodedb/nodeiteration_test.go index 2700fff0eb8..9a61a6b1e92 100644 --- a/internal/scheduler/nodedb/nodeiteration_test.go +++ b/internal/scheduler/nodedb/nodeiteration_test.go @@ -332,27 +332,27 @@ func TestNodeTypeIterator(t *testing.T) { testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("1"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("1"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("2"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("5"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("5"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), @@ -367,9 +367,9 @@ func TestNodeTypeIterator(t *testing.T) { testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("2Gi"), - "gpu": resource.MustParse("1"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("2Gi"), + "nvidia.com/gpu": resource.MustParse("1"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), @@ -409,9 +409,9 @@ func TestNodeTypeIterator(t *testing.T) { nodeTypeId: 1, priority: 0, resourceRequests: schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("32"), - "memory": resource.MustParse("512Gi"), - "gpu": resource.MustParse("4"), + "cpu": resource.MustParse("32"), + "memory": resource.MustParse("512Gi"), + "nvidia.com/gpu": resource.MustParse("4"), }}, expected: []int{7, 5, 4, 2, 1, 0}, }, @@ -436,9 +436,12 @@ func TestNodeTypeIterator(t *testing.T) { } require.NoError(t, nodeDb.UpsertMany(entries)) - indexedResourceRequests := make([]resource.Quantity, len(testfixtures.TestResources)) - for i, t := range nodeDb.indexedResources { - indexedResourceRequests[i] = tc.resourceRequests.Get(t) + indexedResourceRequests := make([]int64, len(testfixtures.TestResources)) + rr, err := testfixtures.TestResourceListFactory.FromJobResourceListFailOnUnknown(schedulerobjects.V1ResourceListFromResourceList(tc.resourceRequests)) + assert.Nil(t, err) + for i, resourceName := range nodeDb.indexedResources { + indexedResourceRequests[i], err = rr.GetByName(resourceName) + assert.Nil(t, err) } keyIndex := -1 for i, p := range nodeDb.nodeDbPriorities { @@ -453,9 +456,9 @@ func TestNodeTypeIterator(t *testing.T) { nodeIndexName(keyIndex), tc.priority, keyIndex, - testfixtures.TestResourceNames, + nodeDb.indexedResources, indexedResourceRequests, - testfixtures.TestIndexedResourceResolutionMillis, + nodeDb.indexedResourceResolution, ) require.NoError(t, err) @@ -712,27 +715,27 @@ func TestNodeTypesIterator(t *testing.T) { testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("1"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("1"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("2"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("1Gi"), - "gpu": resource.MustParse("5"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("1Gi"), + "nvidia.com/gpu": resource.MustParse("5"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), @@ -752,9 +755,9 @@ func TestNodeTypesIterator(t *testing.T) { testfixtures.WithUsedResourcesNodes( 0, schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("31"), - "memory": resource.MustParse("2Gi"), - "gpu": resource.MustParse("1"), + "cpu": resource.MustParse("31"), + "memory": resource.MustParse("2Gi"), + "nvidia.com/gpu": resource.MustParse("1"), }}, testfixtures.N8GpuNodes(1, testfixtures.TestPriorities), ), @@ -800,9 +803,9 @@ func TestNodeTypesIterator(t *testing.T) { nodeTypeIds: []uint64{1, 2, 3}, priority: 0, resourceRequests: schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("32"), - "memory": resource.MustParse("512Gi"), - "gpu": resource.MustParse("4"), + "cpu": resource.MustParse("32"), + "memory": resource.MustParse("512Gi"), + "nvidia.com/gpu": resource.MustParse("4"), }}, expected: []int{7, 5, 4, 2, 1, 0}, }, @@ -827,9 +830,13 @@ func TestNodeTypesIterator(t *testing.T) { } require.NoError(t, nodeDb.UpsertMany(entries)) - indexedResourceRequests := make([]resource.Quantity, len(testfixtures.TestResources)) - for i, t := range testfixtures.TestResourceNames { - indexedResourceRequests[i] = tc.resourceRequests.Get(t) + rr, err := testfixtures.TestResourceListFactory.FromJobResourceListFailOnUnknown(schedulerobjects.V1ResourceListFromResourceList(tc.resourceRequests)) + assert.Nil(t, err) + + indexedResourceRequests := make([]int64, len(testfixtures.TestResources)) + for i, resourceName := range testfixtures.TestResourceNames { + indexedResourceRequests[i], err = rr.GetByName(resourceName) + assert.Nil(t, err) } it, err := NewNodeTypesIterator( nodeDb.Txn(false), @@ -837,9 +844,9 @@ func TestNodeTypesIterator(t *testing.T) { nodeDb.indexNameByPriority[tc.priority], tc.priority, nodeDb.keyIndexByPriority[tc.priority], - testfixtures.TestResourceNames, + nodeDb.indexedResources, indexedResourceRequests, - testfixtures.TestIndexedResourceResolutionMillis, + nodeDb.indexedResourceResolution, ) require.NoError(t, err) @@ -889,8 +896,9 @@ func BenchmarkNodeTypeIterator(b *testing.B) { // Create iterator for 0 CPU required and an unfeasible memory request, // such that the iterator has to consider all nodes. - indexedResourceRequests := make([]resource.Quantity, len(nodeDb.indexedResources)) - indexedResourceRequests[1] = resource.MustParse("1Ti") + indexedResourceRequests := make([]int64, len(nodeDb.indexedResources)) + oneTiB := resource.MustParse("1Ti") + indexedResourceRequests[1] = oneTiB.ScaledValue(0) nodeTypeId := maps.Keys(nodeDb.nodeTypes)[0] var priority int32 txn := nodeDb.Txn(false) @@ -906,7 +914,7 @@ func BenchmarkNodeTypeIterator(b *testing.B) { nodeDb.keyIndexByPriority[priority], nodeDb.indexedResources, indexedResourceRequests, - testfixtures.TestIndexedResourceResolutionMillis, + nodeDb.indexedResourceResolution, ) require.NoError(b, err) for { diff --git a/internal/scheduler/nodedb/nodematching.go b/internal/scheduler/nodedb/nodematching.go index 963c20ef9bb..a068d9e1fbd 100644 --- a/internal/scheduler/nodedb/nodematching.go +++ b/internal/scheduler/nodedb/nodematching.go @@ -14,10 +14,6 @@ import ( ) const ( - // When checking if a pod fits on a node, this score indicates how well the pods fits. - // However, all nodes are currently given the same score. - SchedulableScore = 0 - SchedulableBestScore = SchedulableScore PodRequirementsNotMetReasonUnknown = "unknown" PodRequirementsNotMetReasonInsufficientResources = "insufficient resources available" ) @@ -150,22 +146,22 @@ func NodeTypeJobRequirementsMet(nodeType *schedulerobjects.NodeType, jctx *sched return NodeSelectorRequirementsMet(nodeTypeLabelGetter, nodeType.GetUnsetIndexedLabels(), jctx.PodRequirements.GetNodeSelector()) } -// jobRequirementsMet determines whether a job can be scheduled onto this node. +// JobRequirementsMet determines whether a job can be scheduled onto this node. // If the pod can be scheduled, the returned score indicates how well the node fits: // - 0: Pod can be scheduled by preempting running pods. // - 1: Pod can be scheduled without preempting any running pods. // If the requirements are not met, it returns the reason why. // If the requirements can't be parsed, an error is returned. -func jobRequirementsMet(node *internaltypes.Node, priority int32, jctx *schedulercontext.JobSchedulingContext) (bool, int, PodRequirementsNotMetReason, error) { +func JobRequirementsMet(node *internaltypes.Node, priority int32, jctx *schedulercontext.JobSchedulingContext) (bool, PodRequirementsNotMetReason, error) { matches, reason, err := StaticJobRequirementsMet(node, jctx) if !matches || err != nil { - return matches, 0, reason, err + return matches, reason, err } - matches, score, reason := DynamicJobRequirementsMet(node.AllocatableByPriority[priority], jctx) + matches, reason = DynamicJobRequirementsMet(node.AllocatableByPriority[priority], jctx) if !matches { - return matches, 0, reason, nil + return matches, reason, nil } - return true, score, nil, nil + return true, nil, nil } // StaticJobRequirementsMet checks if a job can be scheduled onto this node, @@ -191,7 +187,7 @@ func StaticJobRequirementsMet(node *internaltypes.Node, jctx *schedulercontext.J return matches, reason, err } - matches, reason = ResourceRequirementsMet(node.TotalResources, jctx.PodRequirements.ResourceRequirements.Requests) + matches, reason = resourceRequirementsMet(node.TotalResources, jctx.ResourceRequirements) if !matches { return matches, reason, nil } @@ -201,9 +197,9 @@ func StaticJobRequirementsMet(node *internaltypes.Node, jctx *schedulercontext.J // DynamicJobRequirementsMet checks if a pod can be scheduled onto this node, // accounting for resources allocated to pods already assigned to this node. -func DynamicJobRequirementsMet(allocatableResources schedulerobjects.ResourceList, jctx *schedulercontext.JobSchedulingContext) (bool, int, PodRequirementsNotMetReason) { - matches, reason := ResourceRequirementsMet(allocatableResources, jctx.PodRequirements.ResourceRequirements.Requests) - return matches, SchedulableScore, reason +func DynamicJobRequirementsMet(allocatableResources internaltypes.ResourceList, jctx *schedulercontext.JobSchedulingContext) (bool, PodRequirementsNotMetReason) { + matches, reason := resourceRequirementsMet(allocatableResources, jctx.ResourceRequirements) + return matches, reason } func TolerationRequirementsMet(taints []v1.Taint, tolerations ...[]v1.Toleration) (bool, PodRequirementsNotMetReason) { @@ -264,8 +260,8 @@ func NodeAffinityRequirementsMet(node *internaltypes.Node, nodeSelector *v1.Node return true, nil, nil } -func ResourceRequirementsMet(available schedulerobjects.ResourceList, required v1.ResourceList) (bool, PodRequirementsNotMetReason) { - resourceName, availableQuantity, requiredQuantity, hasGreaterResource := findGreaterQuantity(available, required) +func resourceRequirementsMet(available internaltypes.ResourceList, required internaltypes.ResourceList) (bool, PodRequirementsNotMetReason) { + resourceName, availableQuantity, requiredQuantity, hasGreaterResource := required.ExceedsAvailable(available) if hasGreaterResource { return false, &InsufficientResources{ ResourceName: resourceName, @@ -275,15 +271,3 @@ func ResourceRequirementsMet(available schedulerobjects.ResourceList, required v } return true, nil } - -// findGreaterQuantity returns the name of a resource in required with non-zero quantity such that -// the corresponding quantity in available is smaller, or returns false if no such resource can be found. -func findGreaterQuantity(available schedulerobjects.ResourceList, required v1.ResourceList) (string, resource.Quantity, resource.Quantity, bool) { - for t, requiredQuantity := range required { - availableQuantity := available.Get(string(t)) - if requiredQuantity.Cmp(availableQuantity) == 1 { - return string(t), availableQuantity, requiredQuantity, true - } - } - return "", resource.Quantity{}, resource.Quantity{}, false -} diff --git a/internal/scheduler/nodedb/nodematching_test.go b/internal/scheduler/nodedb/nodematching_test.go index 82569e04ffe..5ae54c65f8a 100644 --- a/internal/scheduler/nodedb/nodematching_test.go +++ b/internal/scheduler/nodedb/nodematching_test.go @@ -10,6 +10,7 @@ import ( schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/internal/scheduler/testfixtures" ) func TestNodeSchedulingRequirementsMet(t *testing.T) { @@ -264,6 +265,7 @@ func TestNodeSchedulingRequirementsMet(t *testing.T) { }, "sufficient cpu": { node: makeTestNodeResources( + t, schedulerobjects.AllocatableByPriorityAndResourceType{ 0: schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{ @@ -289,6 +291,7 @@ func TestNodeSchedulingRequirementsMet(t *testing.T) { }, "insufficient cpu": { node: makeTestNodeResources( + t, schedulerobjects.AllocatableByPriorityAndResourceType{ 0: schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{ @@ -314,6 +317,7 @@ func TestNodeSchedulingRequirementsMet(t *testing.T) { }, "sufficient cpu at priority": { node: makeTestNodeResources( + t, schedulerobjects.AllocatableByPriorityAndResourceType{ 0: schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{ @@ -344,6 +348,7 @@ func TestNodeSchedulingRequirementsMet(t *testing.T) { }, "insufficient cpu at priority": { node: makeTestNodeResources( + t, schedulerobjects.AllocatableByPriorityAndResourceType{ 0: schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{ @@ -375,12 +380,13 @@ func TestNodeSchedulingRequirementsMet(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - matches, _, reason, err := jobRequirementsMet( + matches, reason, err := JobRequirementsMet( tc.node, tc.req.Priority, // TODO(albin): Define a jctx in the test case instead. &schedulercontext.JobSchedulingContext{ - PodRequirements: tc.req, + PodRequirements: tc.req, + ResourceRequirements: testfixtures.TestResourceListFactory.FromJobResourceListIgnoreUnknown(schedulerobjects.ResourceListFromV1ResourceList(tc.req.ResourceRequirements.Requests).Resources), }, ) assert.NoError(t, err) @@ -528,7 +534,12 @@ func TestNodeTypeSchedulingRequirementsMet(t *testing.T) { tc.IndexedLabels, ) // TODO(albin): Define a jctx in the test case instead. - matches, reason := NodeTypeJobRequirementsMet(nodeType, &schedulercontext.JobSchedulingContext{PodRequirements: tc.Req}) + matches, reason := NodeTypeJobRequirementsMet(nodeType, + &schedulercontext.JobSchedulingContext{ + PodRequirements: tc.Req, + ResourceRequirements: testfixtures.TestResourceListFactory.FromJobResourceListIgnoreUnknown(schedulerobjects.ResourceListFromV1ResourceList(tc.Req.ResourceRequirements.Requests).Resources), + }, + ) if tc.ExpectSuccess { assert.True(t, matches) assert.Nil(t, reason) @@ -644,16 +655,25 @@ func makeTestNodeTaintsLabels(taints []v1.Taint, labels map[string]string) *inte "name", taints, labels, - schedulerobjects.ResourceList{}, - schedulerobjects.AllocatableByPriorityAndResourceType{}, - map[string]schedulerobjects.ResourceList{}, - map[string]schedulerobjects.ResourceList{}, + internaltypes.ResourceList{}, + map[int32]internaltypes.ResourceList{}, + map[string]internaltypes.ResourceList{}, + map[string]internaltypes.ResourceList{}, map[string]bool{}, [][]byte{}, ) } -func makeTestNodeResources(allocatableByPriority schedulerobjects.AllocatableByPriorityAndResourceType, totalResources schedulerobjects.ResourceList) *internaltypes.Node { +func makeTestNodeResources(t *testing.T, allocatableByPriority schedulerobjects.AllocatableByPriorityAndResourceType, totalResources schedulerobjects.ResourceList) *internaltypes.Node { + tr, err := testfixtures.TestResourceListFactory.FromJobResourceListFailOnUnknown(schedulerobjects.V1ResourceListFromResourceList(totalResources)) + assert.Nil(t, err) + + abp := map[int32]internaltypes.ResourceList{} + for pri, rl := range allocatableByPriority { + abp[pri], err = testfixtures.TestResourceListFactory.FromJobResourceListFailOnUnknown(schedulerobjects.V1ResourceListFromResourceList(rl)) + assert.Nil(t, err) + } + return internaltypes.CreateNode( "id", 1, @@ -662,10 +682,10 @@ func makeTestNodeResources(allocatableByPriority schedulerobjects.AllocatableByP "name", []v1.Taint{}, map[string]string{}, - totalResources, - allocatableByPriority, - map[string]schedulerobjects.ResourceList{}, - map[string]schedulerobjects.ResourceList{}, + tr, + abp, + map[string]internaltypes.ResourceList{}, + map[string]internaltypes.ResourceList{}, map[string]bool{}, [][]byte{}, ) diff --git a/internal/scheduler/pool_assigner.go b/internal/scheduler/pool_assigner.go index 556d6eea246..acd82e7606d 100644 --- a/internal/scheduler/pool_assigner.go +++ b/internal/scheduler/pool_assigner.go @@ -1,191 +1,51 @@ package scheduler import ( - "time" - - "github.com/gogo/protobuf/proto" - lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/util/clock" - - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/stringinterner" - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/constraints" - schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/database" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/jobdb" - "github.com/armadaproject/armada/internal/scheduler/nodedb" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// PoolAssigner allows jobs to be assigned to a pool +// PoolAssigner allows jobs to be assigned to one or more pools // Note that this is intended only for use with metrics calculation type PoolAssigner interface { + AssignPools(j *jobdb.Job) ([]string, error) Refresh(ctx *armadacontext.Context) error - AssignPool(j *jobdb.Job) (string, error) -} - -type executor struct { - nodeDb *nodedb.NodeDb - minimumJobSize schedulerobjects.ResourceList } type DefaultPoolAssigner struct { - executorTimeout time.Duration - priorityClasses map[string]types.PriorityClass - priorities []int32 - indexedResources []configuration.IndexedResource - indexedTaints []string - indexedNodeLabels []string - wellKnownNodeTypes []configuration.WellKnownNodeType - poolByExecutorId map[string]string - executorsByPool map[string][]*executor - executorRepository database.ExecutorRepository - schedulingKeyGenerator *schedulerobjects.SchedulingKeyGenerator - poolCache *lru.Cache - clock clock.Clock + executorRepository database.ExecutorRepository + poolByExecutorId map[string]string } -func NewPoolAssigner(executorTimeout time.Duration, - schedulingConfig configuration.SchedulingConfig, - executorRepository database.ExecutorRepository, -) (*DefaultPoolAssigner, error) { - poolCache, err := lru.New(maxJobSchedulingResults) - if err != nil { - return nil, errors.Wrap(err, "error creating PoolAssigner pool cache") - } +func NewPoolAssigner(executorRepository database.ExecutorRepository) *DefaultPoolAssigner { return &DefaultPoolAssigner{ - executorTimeout: executorTimeout, - priorityClasses: schedulingConfig.PriorityClasses, - executorsByPool: map[string][]*executor{}, - poolByExecutorId: map[string]string{}, - priorities: types.AllowedPriorities(schedulingConfig.PriorityClasses), - indexedResources: schedulingConfig.IndexedResources, - indexedTaints: schedulingConfig.IndexedTaints, - wellKnownNodeTypes: schedulingConfig.WellKnownNodeTypes, - indexedNodeLabels: schedulingConfig.IndexedNodeLabels, - executorRepository: executorRepository, - schedulingKeyGenerator: schedulerobjects.NewSchedulingKeyGenerator(), - poolCache: poolCache, - clock: clock.RealClock{}, - }, nil + executorRepository: executorRepository, + poolByExecutorId: map[string]string{}, + } } // Refresh updates executor state func (p *DefaultPoolAssigner) Refresh(ctx *armadacontext.Context) error { executors, err := p.executorRepository.GetExecutors(ctx) - executorsByPool := map[string][]*executor{} - poolByExecutorId := map[string]string{} if err != nil { return err } + poolByExecutorId := map[string]string{} for _, e := range executors { - if p.clock.Since(e.LastUpdateTime) < p.executorTimeout { - poolByExecutorId[e.Id] = e.Pool - nodeDb, err := p.constructNodeDb(e.Nodes) - if err != nil { - return errors.WithMessagef(err, "could not construct node db for executor %s", e.Id) - } - executorsByPool[e.Pool] = append(executorsByPool[e.Pool], &executor{ - nodeDb: nodeDb, - minimumJobSize: e.MinimumJobSize, - }) - } + poolByExecutorId[e.Id] = e.Pool } - p.executorsByPool = executorsByPool p.poolByExecutorId = poolByExecutorId - p.schedulingKeyGenerator = schedulerobjects.NewSchedulingKeyGenerator() - p.poolCache.Purge() return nil } -// AssignPool returns the pool associated with the job or the empty string if no pool is valid -func (p *DefaultPoolAssigner) AssignPool(j *jobdb.Job) (string, error) { - // If Job is running then use the pool associated with the executor it was assigned to +// AssignPools returns the pools associated with the job or the empty string if no pool is valid +func (p *DefaultPoolAssigner) AssignPools(j *jobdb.Job) ([]string, error) { + // If Job has an active run then use the pool associated with the executor it was assigned to if !j.Queued() && j.HasRuns() { - return p.poolByExecutorId[j.LatestRun().Executor()], nil - } - - // See if we have this set of reqs cached. - schedulingKey, ok := j.GetSchedulingKey() - if !ok { - schedulingKey = interfaces.SchedulingKeyFromLegacySchedulerJob(p.schedulingKeyGenerator, j) - } - if cachedPool, ok := p.poolCache.Get(schedulingKey); ok { - return cachedPool.(string), nil - } - - req := j.PodRequirements() - req = p.clearAnnotations(req) - - // Otherwise iterate through each pool and detect the first one the job is potentially schedulable on. - // TODO: We should use the real scheduler instead since this check may go out of sync with the scheduler. - for pool, executors := range p.executorsByPool { - for _, e := range executors { - requests := req.GetResourceRequirements().Requests - if ok, _ := constraints.RequestsAreLargeEnough(schedulerobjects.ResourceListFromV1ResourceList(requests), e.minimumJobSize); !ok { - continue - } - nodeDb := e.nodeDb - txn := nodeDb.Txn(true) - jctx := &schedulercontext.JobSchedulingContext{ - Created: time.Now(), - JobId: j.GetId(), - Job: j, - PodRequirements: j.GetPodRequirements(p.priorityClasses), - GangInfo: schedulercontext.EmptyGangInfo(j), - } - node, err := nodeDb.SelectNodeForJobWithTxn(txn, jctx) - txn.Abort() - if err != nil { - return "", errors.WithMessagef(err, "error selecting node for job %s", j.Id()) - } - if node != nil { - p.poolCache.Add(schedulingKey, pool) - return pool, nil - } - } - } - return "", nil -} - -func (p *DefaultPoolAssigner) constructNodeDb(nodes []*schedulerobjects.Node) (*nodedb.NodeDb, error) { - // Nodes to be considered by the scheduler. - nodeDb, err := nodedb.NewNodeDb( - p.priorityClasses, - 0, - p.indexedResources, - p.indexedTaints, - p.indexedNodeLabels, - p.wellKnownNodeTypes, - stringinterner.New(1024), - ) - if err != nil { - return nil, err - } - txn := nodeDb.Txn(true) - defer txn.Abort() - for _, node := range nodes { - if err := nodeDb.CreateAndInsertWithJobDbJobsWithTxn(txn, nil, node); err != nil { - return nil, err - } - } - txn.Commit() - err = nodeDb.ClearAllocated() - if err != nil { - return nil, err - } - return nodeDb, nil -} - -// clearAnnotations -func (p *DefaultPoolAssigner) clearAnnotations(reqs *schedulerobjects.PodRequirements) *schedulerobjects.PodRequirements { - reqsCopy := proto.Clone(reqs).(*schedulerobjects.PodRequirements) - for key := range reqsCopy.GetAnnotations() { - reqsCopy.Annotations[key] = "poolassigner" + pool := p.poolByExecutorId[j.LatestRun().Executor()] + return []string{pool}, nil } - return reqsCopy + // otherwise use the pools associated with the job + return j.Pools(), nil } diff --git a/internal/scheduler/pool_assigner_test.go b/internal/scheduler/pool_assigner_test.go index faf37636542..a2010a0a92f 100644 --- a/internal/scheduler/pool_assigner_test.go +++ b/internal/scheduler/pool_assigner_test.go @@ -7,42 +7,44 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/clock" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" ) -func TestPoolAssigner_AssignPool(t *testing.T) { - executorTimeout := 15 * time.Minute - cpuJob := testfixtures.TestQueuedJobDbJob() - gpuJob := testfixtures.WithJobDbJobPodRequirements(testfixtures.TestQueuedJobDbJob(), testfixtures.Test1GpuPodReqs(testfixtures.TestQueue, util.ULID(), testfixtures.TestPriorities[0])) +func TestPoolAssigner_AssignPools(t *testing.T) { + queuedJob := testfixtures.TestQueuedJobDbJob() + cpuExecutor := testfixtures.TestExecutor(testfixtures.BaseTime) + runningJob := queuedJob. + WithQueued(false). + WithNewRun(cpuExecutor.Id, "testNode", "testNode", 0) tests := map[string]struct { executorTimout time.Duration - config configuration.SchedulingConfig executors []*schedulerobjects.Executor job *jobdb.Job - expectedPool string + expectedPools []string }{ - "matches pool": { - executorTimout: executorTimeout, - config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - job: cpuJob, - expectedPool: "cpu", + "queued job with single pool": { + job: queuedJob.WithPools([]string{"cpu"}), + expectedPools: []string{"cpu"}, }, - "doesn't match pool": { - executorTimout: executorTimeout, - config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - job: gpuJob, - expectedPool: "", + "queued job with multiple pools": { + job: queuedJob.WithPools([]string{"cpu", "gpu"}), + expectedPools: []string{"cpu", "gpu"}, + }, + "running job matches pool": { + executors: []*schedulerobjects.Executor{cpuExecutor}, + job: runningJob, + expectedPools: []string{"cpu"}, + }, + "running job doesn't match pool": { + executors: []*schedulerobjects.Executor{}, + job: runningJob, + expectedPools: []string{""}, }, } for name, tc := range tests { @@ -53,17 +55,14 @@ func TestPoolAssigner_AssignPool(t *testing.T) { ctrl := gomock.NewController(t) mockExecutorRepo := schedulermocks.NewMockExecutorRepository(ctrl) mockExecutorRepo.EXPECT().GetExecutors(ctx).Return(tc.executors, nil).AnyTimes() - fakeClock := clock.NewFakeClock(testfixtures.BaseTime) - assigner, err := NewPoolAssigner(tc.executorTimout, tc.config, mockExecutorRepo) - require.NoError(t, err) - assigner.clock = fakeClock + assigner := NewPoolAssigner(mockExecutorRepo) - err = assigner.Refresh(ctx) + err := assigner.Refresh(ctx) require.NoError(t, err) - pool, err := assigner.AssignPool(tc.job) + pools, err := assigner.AssignPools(tc.job) require.NoError(t, err) - assert.Equal(t, tc.expectedPool, pool) + assert.Equal(t, tc.expectedPools, pools) }) } } diff --git a/internal/scheduler/preempting_queue_scheduler.go b/internal/scheduler/preempting_queue_scheduler.go index dacf70dc147..5ba4a809bb1 100644 --- a/internal/scheduler/preempting_queue_scheduler.go +++ b/internal/scheduler/preempting_queue_scheduler.go @@ -1,7 +1,6 @@ package scheduler import ( - "math/rand" "reflect" "time" @@ -9,7 +8,6 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/maps" "golang.org/x/exp/slices" - "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/common/armadacontext" armadamaps "github.com/armadaproject/armada/internal/common/maps" @@ -19,7 +17,6 @@ import ( schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" @@ -29,13 +26,11 @@ import ( // PreemptingQueueScheduler is a scheduler that makes a unified decisions on which jobs to preempt and schedule. // Uses QueueScheduler as a building block. type PreemptingQueueScheduler struct { - schedulingContext *schedulercontext.SchedulingContext - constraints schedulerconstraints.SchedulingConstraints - nodeEvictionProbability float64 - nodeOversubscriptionEvictionProbability float64 - protectedFractionOfFairShare float64 - jobRepo JobRepository - nodeDb *nodedb.NodeDb + schedulingContext *schedulercontext.SchedulingContext + constraints schedulerconstraints.SchedulingConstraints + protectedFractionOfFairShare float64 + jobRepo JobRepository + nodeDb *nodedb.NodeDb // Maps job ids to the id of the node the job is associated with. // For scheduled or running jobs, that is the node the job is assigned to. // For preempted jobs, that is the node the job was preempted from. @@ -53,8 +48,6 @@ type PreemptingQueueScheduler struct { func NewPreemptingQueueScheduler( sctx *schedulercontext.SchedulingContext, constraints schedulerconstraints.SchedulingConstraints, - nodeEvictionProbability float64, - nodeOversubscriptionEvictionProbability float64, protectedFractionOfFairShare float64, jobRepo JobRepository, nodeDb *nodedb.NodeDb, @@ -76,16 +69,14 @@ func NewPreemptingQueueScheduler( initialJobIdsByGangId[gangId] = maps.Clone(jobIds) } return &PreemptingQueueScheduler{ - schedulingContext: sctx, - constraints: constraints, - nodeEvictionProbability: nodeEvictionProbability, - nodeOversubscriptionEvictionProbability: nodeOversubscriptionEvictionProbability, - protectedFractionOfFairShare: protectedFractionOfFairShare, - jobRepo: jobRepo, - nodeDb: nodeDb, - nodeIdByJobId: maps.Clone(initialNodeIdByJobId), - jobIdsByGangId: initialJobIdsByGangId, - gangIdByJobId: maps.Clone(initialGangIdByJobId), + schedulingContext: sctx, + constraints: constraints, + protectedFractionOfFairShare: protectedFractionOfFairShare, + jobRepo: jobRepo, + nodeDb: nodeDb, + nodeIdByJobId: maps.Clone(initialNodeIdByJobId), + jobIdsByGangId: initialJobIdsByGangId, + gangIdByJobId: maps.Clone(initialGangIdByJobId), } } @@ -115,27 +106,27 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche // Evict preemptible jobs. totalCost := sch.schedulingContext.TotalCost() + ctx.WithField("stage", "scheduling-algo").Infof("Evicting preemptible jobs") evictorResult, inMemoryJobRepo, err := sch.evict( armadacontext.WithLogField(ctx, "stage", "evict for resource balancing"), NewNodeEvictor( sch.jobRepo, sch.nodeDb, sch.schedulingContext.PriorityClasses, - sch.nodeEvictionProbability, - func(ctx *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(sch.schedulingContext.PriorityClasses, sch.schedulingContext.DefaultPriorityClass, job) + func(ctx *armadacontext.Context, job *jobdb.Job) bool { + priorityClass := job.PriorityClass() if !priorityClass.Preemptible { return false } - if job.GetAnnotations() == nil { - ctx.Errorf("can't evict job %s: annotations not initialised", job.GetId()) + if job.Annotations() == nil { + ctx.Errorf("can't evict job %s: annotations not initialised", job.Id()) return false } - if job.GetNodeSelector() == nil { - ctx.Errorf("can't evict job %s: nodeSelector not initialised", job.GetId()) + if job.NodeSelector() == nil { + ctx.Errorf("can't evict job %s: nodeSelector not initialised", job.Id()) return false } - if qctx, ok := sch.schedulingContext.QueueSchedulingContexts[job.GetQueue()]; ok { + if qctx, ok := sch.schedulingContext.QueueSchedulingContexts[job.Queue()]; ok { fairShare := qctx.Weight / sch.schedulingContext.WeightSum actualShare := sch.schedulingContext.FairnessCostProvider.CostFromQueue(qctx) / totalCost fractionOfFairShare := actualShare / fairShare @@ -145,18 +136,19 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche } return true }, - nil, ), ) if err != nil { return nil, err } + ctx.WithField("stage", "scheduling-algo").Info("Finished evicting preemptible jobs") for _, jctx := range evictorResult.EvictedJctxsByJobId { preemptedJobsById[jctx.JobId] = jctx } maps.Copy(sch.nodeIdByJobId, evictorResult.NodeIdByJobId) // Re-schedule evicted jobs/schedule new jobs. + ctx.WithField("stage", "scheduling-algo").Info("Performing initial scheduling jobs onto nodes") schedulerResult, err := sch.schedule( armadacontext.WithLogField(ctx, "stage", "re-schedule after balancing eviction"), inMemoryJobRepo, @@ -165,6 +157,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche if err != nil { return nil, err } + ctx.WithField("stage", "scheduling-algo").Info("Finished initial scheduling of jobs onto nodes") for _, jctx := range schedulerResult.ScheduledJobs { if _, ok := preemptedJobsById[jctx.JobId]; ok { delete(preemptedJobsById, jctx.JobId) @@ -176,20 +169,19 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche maps.Copy(additionalAnnotationsByJobId, schedulerResult.AdditionalAnnotationsByJobId) // Evict jobs on oversubscribed nodes. + ctx.WithField("stage", "scheduling-algo").Info("Evicting jobs from oversubscribed nodes") evictorResult, inMemoryJobRepo, err = sch.evict( armadacontext.WithLogField(ctx, "stage", "evict oversubscribed"), NewOversubscribedEvictor( sch.jobRepo, sch.nodeDb, sch.schedulingContext.PriorityClasses, - sch.schedulingContext.DefaultPriorityClass, - sch.nodeOversubscriptionEvictionProbability, - nil, ), ) if err != nil { return nil, err } + ctx.WithField("stage", "scheduling-algo").Info("Finished evicting jobs from oversubscribed nodes") scheduledAndEvictedJobsById := armadamaps.FilterKeys( scheduledJobsById, func(jobId string) bool { @@ -211,6 +203,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche if len(evictorResult.EvictedJctxsByJobId) > 0 { // Since no new jobs are considered in this round, the scheduling key check brings no benefit. sch.SkipUnsuccessfulSchedulingKeyCheck() + ctx.WithField("stage", "scheduling-algo").Info("Performing second scheduling ") schedulerResult, err = sch.schedule( armadacontext.WithLogField(ctx, "stage", "schedule after oversubscribed eviction"), inMemoryJobRepo, @@ -220,6 +213,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche if err != nil { return nil, err } + ctx.WithField("stage", "scheduling-algo").Info("Finished second scheduling pass") for _, jctx := range schedulerResult.ScheduledJobs { if _, ok := preemptedJobsById[jctx.JobId]; ok { delete(preemptedJobsById, jctx.JobId) @@ -234,20 +228,21 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche preemptedJobs := maps.Values(preemptedJobsById) scheduledJobs := maps.Values(scheduledJobsById) + ctx.WithField("stage", "scheduling-algo").Infof("Unbinding %d preempted and %d evicted jobs", len(preemptedJobs), len(maps.Values(scheduledAndEvictedJobsById))) if err := sch.unbindJobs(append( slices.Clone(preemptedJobs), maps.Values(scheduledAndEvictedJobsById)...), ); err != nil { return nil, err } - if s := JobsSummary(preemptedJobs); s != "" { - ctx.Infof("preempting running jobs; %s", s) - } - if s := JobsSummary(scheduledJobs); s != "" { - ctx.Infof("scheduling new jobs; %s", s) - } + ctx.WithField("stage", "scheduling-algo").Infof("Finished unbinding preempted and evicted jobs") + + PrintJobSummary(ctx, "Preempting running jobs;", preemptedJobs) + PrintJobSummary(ctx, "Scheduling new jobs;", scheduledJobs) // TODO: Show failed jobs. + if sch.enableAssertions { + ctx.WithField("stage", "scheduling-algo").Infof("Performing assertions after scheduling round") err := sch.assertions( snapshot, preemptedJobsById, @@ -257,11 +252,11 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche if err != nil { return nil, err } + ctx.WithField("stage", "scheduling-algo").Infof("Finished running assertions after scheduling round") } return &SchedulerResult{ PreemptedJobs: preemptedJobs, ScheduledJobs: scheduledJobs, - FailedJobs: schedulerResult.FailedJobs, NodeIdByJobId: sch.nodeIdByJobId, AdditionalAnnotationsByJobId: additionalAnnotationsByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, @@ -401,15 +396,14 @@ func (sch *PreemptingQueueScheduler) collectIdsForGangEviction(evictorResult *Ev // Otherwise, the evicted gang jobs will not be schedulable, since some gang jobs will be considered missing. func (sch *PreemptingQueueScheduler) setEvictedGangCardinality(evictorResult *EvictorResult) { for _, jctx := range evictorResult.EvictedJctxsByJobId { - gangId, ok := sch.gangIdByJobId[jctx.Job.GetId()] + gangId, ok := sch.gangIdByJobId[jctx.Job.Id()] if !ok { // Not a gang job. continue } - // Override cardinality and min cardinality with the number of evicted jobs in this gang. + // Override cardinality with the number of evicted jobs in this gang. jctx.GangInfo.Cardinality = len(sch.jobIdsByGangId[gangId]) - jctx.GangInfo.MinimumCardinality = jctx.GangInfo.Cardinality } return } @@ -483,7 +477,7 @@ func (q MinimalQueue) GetWeight() float64 { // addEvictedJobsToNodeDb adds evicted jobs to the NodeDb. // Needed to enable the nodeDb accounting for these when preempting. -func addEvictedJobsToNodeDb(ctx *armadacontext.Context, sctx *schedulercontext.SchedulingContext, nodeDb *nodedb.NodeDb, inMemoryJobRepo *InMemoryJobRepository) error { +func addEvictedJobsToNodeDb(_ *armadacontext.Context, sctx *schedulercontext.SchedulingContext, nodeDb *nodedb.NodeDb, inMemoryJobRepo *InMemoryJobRepository) error { gangItByQueue := make(map[string]*QueuedGangIterator) for _, qctx := range sctx.QueueSchedulingContexts { gangItByQueue[qctx.Queue] = NewQueuedGangIterator( @@ -531,10 +525,7 @@ func (sch *PreemptingQueueScheduler) schedule(ctx *armadacontext.Context, inMemo if jobRepo == nil || reflect.ValueOf(jobRepo).IsNil() { jobIteratorByQueue[qctx.Queue] = evictedIt } else { - queueIt, err := NewQueuedJobsIterator(ctx, qctx.Queue, jobRepo, sch.schedulingContext.PriorityClasses) - if err != nil { - return nil, err - } + queueIt := NewQueuedJobsIterator(ctx, qctx.Queue, jobRepo, sch.schedulingContext.PriorityClasses) jobIteratorByQueue[qctx.Queue] = NewMultiJobsIterator(evictedIt, queueIt) } } @@ -574,7 +565,7 @@ func (sch *PreemptingQueueScheduler) unbindJobs(jctxs []*schedulercontext.JobSch func(jctx *schedulercontext.JobSchedulingContext) string { return sch.nodeIdByJobId[jctx.JobId] }, - func(jcxt *schedulercontext.JobSchedulingContext) interfaces.LegacySchedulerJob { + func(jcxt *schedulercontext.JobSchedulingContext) *jobdb.Job { return jcxt.Job }, ) { @@ -596,8 +587,8 @@ func (sch *PreemptingQueueScheduler) unbindJobs(jctxs []*schedulercontext.JobSch // Update sch.gangIdByJobId and sch.jobIdsByGangId based on preempted/scheduled jobs. func (sch *PreemptingQueueScheduler) updateGangAccounting(preempted []*schedulercontext.JobSchedulingContext, scheduled []*schedulercontext.JobSchedulingContext) error { for _, jctx := range preempted { - if gangId, ok := sch.gangIdByJobId[jctx.Job.GetId()]; ok { - delete(sch.gangIdByJobId, jctx.Job.GetId()) + if gangId, ok := sch.gangIdByJobId[jctx.Job.Id()]; ok { + delete(sch.gangIdByJobId, jctx.Job.Id()) delete(sch.jobIdsByGangId, gangId) } } @@ -691,7 +682,7 @@ type Evictor struct { nodeDb *nodedb.NodeDb priorityClasses map[string]types.PriorityClass nodeFilter func(*armadacontext.Context, *internaltypes.Node) bool - jobFilter func(*armadacontext.Context, interfaces.LegacySchedulerJob) bool + jobFilter func(*armadacontext.Context, *jobdb.Job) bool } type EvictorResult struct { @@ -707,22 +698,14 @@ func NewNodeEvictor( jobRepo JobRepository, nodeDb *nodedb.NodeDb, priorityClasses map[string]types.PriorityClass, - perNodeEvictionProbability float64, - jobFilter func(*armadacontext.Context, interfaces.LegacySchedulerJob) bool, - random *rand.Rand, + jobFilter func(*armadacontext.Context, *jobdb.Job) bool, ) *Evictor { - if perNodeEvictionProbability <= 0 { - return nil - } - if random == nil { - random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - } return &Evictor{ jobRepo: jobRepo, nodeDb: nodeDb, priorityClasses: priorityClasses, nodeFilter: func(_ *armadacontext.Context, node *internaltypes.Node) bool { - return len(node.AllocatedByJobId) > 0 && random.Float64() < perNodeEvictionProbability + return len(node.AllocatedByJobId) > 0 }, jobFilter: jobFilter, } @@ -748,8 +731,8 @@ func NewFilteredEvictor( shouldEvict := nodeIdsToEvict[node.GetId()] return shouldEvict }, - jobFilter: func(_ *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { - shouldEvict := jobIdsToEvict[job.GetId()] + jobFilter: func(_ *armadacontext.Context, job *jobdb.Job) bool { + shouldEvict := jobIdsToEvict[job.Id()] return shouldEvict }, } @@ -757,21 +740,11 @@ func NewFilteredEvictor( // NewOversubscribedEvictor returns a new evictor that // for each node evicts all preemptible jobs of a priority class for which at least one job could not be scheduled -// with probability perNodeEvictionProbability. func NewOversubscribedEvictor( jobRepo JobRepository, nodeDb *nodedb.NodeDb, priorityClasses map[string]types.PriorityClass, - defaultPriorityClassName string, - perNodeEvictionProbability float64, - random *rand.Rand, ) *Evictor { - if perNodeEvictionProbability <= 0 { - return nil - } - if random == nil { - random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - } // Populating overSubscribedPriorities relies on // - nodeFilter being called once before all calls to jobFilter and // - jobFilter being called for all jobs on that node before moving on to another node. @@ -787,23 +760,20 @@ func NewOversubscribedEvictor( // Negative priorities correspond to already evicted jobs. continue } - for _, q := range rl.Resources { - if q.Cmp(resource.Quantity{}) == -1 { - overSubscribedPriorities[p] = true - break - } + if rl.HasNegativeValues() { + overSubscribedPriorities[p] = true } } - return len(overSubscribedPriorities) > 0 && random.Float64() < perNodeEvictionProbability + return len(overSubscribedPriorities) > 0 }, - jobFilter: func(ctx *armadacontext.Context, job interfaces.LegacySchedulerJob) bool { - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(priorityClasses, defaultPriorityClassName, job) + jobFilter: func(ctx *armadacontext.Context, job *jobdb.Job) bool { + priorityClass := job.PriorityClass() if !priorityClass.Preemptible { return false } - priority, ok := nodeDb.GetScheduledAtPriority(job.GetId()) + priority, ok := nodeDb.GetScheduledAtPriority(job.Id()) if !ok { - ctx.Warnf("can't evict job %s: not mapped to a priority", job.GetId()) + ctx.Warnf("can't evict job %s: not mapped to a priority", job.Id()) return false } return overSubscribedPriorities[priority] @@ -816,9 +786,9 @@ func NewOversubscribedEvictor( // Any job for which jobFilter returns true is evicted (if the node was not skipped). // If a job was evicted from a node, postEvictFunc is called with the corresponding job and node. func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*EvictorResult, error) { - var jobFilter func(job interfaces.LegacySchedulerJob) bool + var jobFilter func(job *jobdb.Job) bool if evi.jobFilter != nil { - jobFilter = func(job interfaces.LegacySchedulerJob) bool { return evi.jobFilter(ctx, job) } + jobFilter = func(job *jobdb.Job) bool { return evi.jobFilter(ctx, job) } } evictedJctxsByJobId := make(map[string]*schedulercontext.JobSchedulingContext) affectedNodesById := make(map[string]*internaltypes.Node) @@ -839,10 +809,7 @@ func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*Ev jobIds = append(jobIds, jobId) } } - jobs, err := evi.jobRepo.GetExistingJobsByIds(jobIds) - if err != nil { - return nil, err - } + jobs := evi.jobRepo.GetExistingJobsByIds(jobIds) evictedJobs, node, err := evi.nodeDb.EvictJobsFromNode(evi.priorityClasses, jobFilter, jobs, node) if err != nil { return nil, err @@ -850,9 +817,7 @@ func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*Ev // TODO: Should be safe to remove now. for i, evictedJob := range evictedJobs { - if dbJob, ok := evictedJob.(*jobdb.Job); ok { - evictedJobs[i] = dbJob.DeepCopy() - } + evictedJobs[i] = evictedJob.DeepCopy() } for _, job := range evictedJobs { @@ -863,13 +828,13 @@ func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*Ev // - Adding taints to a node doesn't cause jobs already running on the node to be preempted. // - Jobs scheduled as away jobs have the necessary tolerations to be re-scheduled. // TODO(albin): We can remove the checkOnlyDynamicRequirements flag in the nodeDb now that we've added the tolerations. - jctx := schedulercontext.JobSchedulingContextFromJob(evi.priorityClasses, job) + jctx := schedulercontext.JobSchedulingContextFromJob(job) jctx.IsEvicted = true jctx.AddNodeSelector(schedulerconfig.NodeIdLabel, node.GetId()) - evictedJctxsByJobId[job.GetId()] = jctx + evictedJctxsByJobId[job.Id()] = jctx jctx.AdditionalTolerations = append(jctx.AdditionalTolerations, node.GetTolerationsForTaints()...) - nodeIdByJobId[job.GetId()] = node.GetId() + nodeIdByJobId[job.Id()] = node.GetId() } if len(evictedJobs) > 0 { affectedNodesById[node.GetId()] = node diff --git a/internal/scheduler/preempting_queue_scheduler_test.go b/internal/scheduler/preempting_queue_scheduler_test.go index 69ce43cc86a..93000154e43 100644 --- a/internal/scheduler/preempting_queue_scheduler_test.go +++ b/internal/scheduler/preempting_queue_scheduler_test.go @@ -14,7 +14,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadamaps "github.com/armadaproject/armada/internal/common/maps" @@ -22,6 +21,7 @@ import ( "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" @@ -51,7 +51,7 @@ func TestEvictOversubscribed(t *testing.T) { err = nodeDb.CreateAndInsertWithJobDbJobsWithTxn(nodeDbTxn, jobs, node) require.NoError(t, err) - jobDb := jobdb.NewJobDb(config.PriorityClasses, config.DefaultPriorityClassName, stringInterner) + jobDb := jobdb.NewJobDb(config.PriorityClasses, config.DefaultPriorityClassName, stringInterner, testfixtures.TestResourceListFactory) jobDbTxn := jobDb.WriteTxn() err = jobDbTxn.Upsert(jobs) require.NoError(t, err) @@ -59,18 +59,14 @@ func TestEvictOversubscribed(t *testing.T) { evictor := NewOversubscribedEvictor( NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, - config.PriorityClasses, - config.DefaultPriorityClassName, - 1, - nil, - ) + config.PriorityClasses) result, err := evictor.Evict(armadacontext.Background(), nodeDbTxn) require.NoError(t, err) for nodeId, node := range result.AffectedNodesById { for _, p := range priorities { - for resourceType, q := range node.AllocatableByPriority[p].Resources { - assert.NotEqual(t, -1, q.Cmp(resource.Quantity{}), "resource %s oversubscribed by %s on node %s", resourceType, q.String(), nodeId) + for _, r := range node.AllocatableByPriority[p].GetResources() { + assert.True(t, r.Value >= 0, "resource %s oversubscribed by %d on node %s", r.Name, r.Value, nodeId) } } } @@ -573,83 +569,10 @@ func TestPreemptingQueueScheduler(t *testing.T) { "C": 1, }, }, - "gang preemption with partial gang": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - Rounds: []SchedulingRound{ - { - // Schedule a gang across two nodes. - JobsByQueue: map[string][]*jobdb.Job{ - "A": testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 1, - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2), - ), - }, - ExpectedScheduledIndices: map[string][]int{ - "A": testfixtures.IntRange(0, 1), - }, - }, - { - // Unbind one of the jobs in the gang (simulating that job terminating) - // and test that the remaining job isn't preempted. - IndicesToUnbind: map[string]map[int][]int{ - "A": { - 0: testfixtures.IntRange(0, 0), - }, - }, - }, - }, - PriorityFactorByQueue: map[string]float64{ - "A": 1, - }, - }, - "gang preemption with NodeEvictionProbability 0": { - SchedulingConfig: testfixtures.WithNodeEvictionProbabilityConfig( - 0.0, // To test the gang evictor, we need to disable stochastic eviction. - testfixtures.TestSchedulingConfig(), - ), - Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - Rounds: []SchedulingRound{ - { - // Schedule a gang filling all of node 1 and part of node 2. - // Make the jobs of node 1 priority 1, - // to avoid them being urgency-preempted in the next round. - JobsByQueue: map[string][]*jobdb.Job{ - "A": testfixtures.WithGangAnnotationsJobs( - append(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 32), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)...), - ), - }, - ExpectedScheduledIndices: map[string][]int{ - "A": testfixtures.IntRange(0, 32), - }, - }, - { - // Schedule a that requires preempting one job in the gang, - // and assert that all jobs in the gang are preempted. - JobsByQueue: map[string][]*jobdb.Job{ - "B": testfixtures.N32Cpu256GiJobs("B", testfixtures.PriorityClass1, 1), - }, - ExpectedScheduledIndices: map[string][]int{ - "B": testfixtures.IntRange(0, 0), - }, - ExpectedPreemptedIndices: map[string]map[int][]int{ - "A": { - 0: testfixtures.IntRange(0, 32), - }, - }, - }, - }, - PriorityFactorByQueue: map[string]float64{ - "A": 1, - "B": 1, - }, - }, + "gang preemption avoid cascading preemption": { - SchedulingConfig: testfixtures.WithNodeEvictionProbabilityConfig( - 0.0, // To test the gang evictor, we need to disable stochastic eviction. - testfixtures.TestSchedulingConfig(), - ), - Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Rounds: []SchedulingRound{ { // Schedule a gang spanning nodes 1 and 2. @@ -1170,11 +1093,8 @@ func TestPreemptingQueueScheduler(t *testing.T) { }, }, "Oversubscribed eviction does not evict non-preemptible": { - SchedulingConfig: testfixtures.WithNodeEvictionProbabilityConfig( - 0.0, - testfixtures.TestSchedulingConfig(), - ), - Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Rounds: []SchedulingRound{ { JobsByQueue: map[string][]*jobdb.Job{ @@ -1485,18 +1405,6 @@ func TestPreemptingQueueScheduler(t *testing.T) { "home-away preemption, away jobs first": { SchedulingConfig: func() configuration.SchedulingConfig { config := testfixtures.TestSchedulingConfig() - config.PriorityClasses = map[string]types.PriorityClass{ - "armada-preemptible-away": { - Priority: 30000, - Preemptible: true, - - AwayNodeTypes: []types.AwayNodeType{{Priority: 29000, WellKnownNodeTypeName: "gpu"}}, - }, - "armada-preemptible": { - Priority: 30000, - Preemptible: true, - }, - } config.DefaultPriorityClassName = "armada-preemptible" config.WellKnownNodeTypes = []configuration.WellKnownNodeType{ { @@ -1762,7 +1670,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { priorities := types.AllowedPriorities(tc.SchedulingConfig.PriorityClasses) - jobDb := jobdb.NewJobDb(tc.SchedulingConfig.PriorityClasses, tc.SchedulingConfig.DefaultPriorityClassName, stringinterner.New(1024)) + jobDb := jobdb.NewJobDb(tc.SchedulingConfig.PriorityClasses, tc.SchedulingConfig.DefaultPriorityClassName, stringinterner.New(1024), testfixtures.TestResourceListFactory) jobDbTxn := jobDb.WriteTxn() // Accounting across scheduling rounds. @@ -1800,10 +1708,10 @@ func TestPreemptingQueueScheduler(t *testing.T) { for queue, jobs := range round.JobsByQueue { for j, job := range jobs { job = job.WithQueued(true) - require.Equal(t, queue, job.GetQueue()) + require.Equal(t, queue, job.Queue()) queuedJobs = append(queuedJobs, job.WithQueued(true)) - roundByJobId[job.GetId()] = i - indexByJobId[job.GetId()] = j + roundByJobId[job.Id()] = i + indexByJobId[job.Id()] = j } } err = jobDbTxn.Upsert(queuedJobs) @@ -1814,16 +1722,16 @@ func TestPreemptingQueueScheduler(t *testing.T) { for roundIndex, reqIndices := range reqIndicesByRoundIndex { for _, reqIndex := range reqIndices { job := tc.Rounds[roundIndex].JobsByQueue[queue][reqIndex] - nodeId := nodeIdByJobId[job.GetId()] + nodeId := nodeIdByJobId[job.Id()] node, err := nodeDb.GetNode(nodeId) require.NoError(t, err) node, err = nodeDb.UnbindJobFromNode(tc.SchedulingConfig.PriorityClasses, job, node) require.NoError(t, err) err = nodeDb.Upsert(node) require.NoError(t, err) - if gangId, ok := gangIdByJobId[job.GetId()]; ok { - delete(gangIdByJobId, job.GetId()) - delete(jobIdsByGangId[gangId], job.GetId()) + if gangId, ok := gangIdByJobId[job.Id()]; ok { + delete(gangIdByJobId, job.Id()) + delete(jobIdsByGangId[gangId], job.Id()) } } } @@ -1880,8 +1788,6 @@ func TestPreemptingQueueScheduler(t *testing.T) { sch := NewPreemptingQueueScheduler( sctx, constraints, - tc.SchedulingConfig.NodeEvictionProbability, - tc.SchedulingConfig.NodeOversubscriptionEvictionProbability, tc.SchedulingConfig.ProtectedFractionOfFairShare, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, @@ -1899,26 +1805,26 @@ func TestPreemptingQueueScheduler(t *testing.T) { // Test resource accounting. for _, jctx := range result.PreemptedJobs { job := jctx.Job - m := allocatedByQueueAndPriorityClass[job.GetQueue()] + m := allocatedByQueueAndPriorityClass[job.Queue()] if m == nil { m = make(schedulerobjects.QuantityByTAndResourceType[string]) - allocatedByQueueAndPriorityClass[job.GetQueue()] = m + allocatedByQueueAndPriorityClass[job.Queue()] = m } m.SubV1ResourceList( - job.GetPriorityClassName(), - job.GetResourceRequirements().Requests, + job.PriorityClassName(), + job.ResourceRequirements().Requests, ) } for _, jctx := range result.ScheduledJobs { job := jctx.Job - m := allocatedByQueueAndPriorityClass[job.GetQueue()] + m := allocatedByQueueAndPriorityClass[job.Queue()] if m == nil { m = make(schedulerobjects.QuantityByTAndResourceType[string]) - allocatedByQueueAndPriorityClass[job.GetQueue()] = m + allocatedByQueueAndPriorityClass[job.Queue()] = m } m.AddV1ResourceList( - job.GetPriorityClassName(), - job.GetResourceRequirements().Requests, + job.PriorityClassName(), + job.ResourceRequirements().Requests, ) } for queue, qctx := range sctx.QueueSchedulingContexts { @@ -1928,17 +1834,17 @@ func TestPreemptingQueueScheduler(t *testing.T) { // Test that jobs are mapped to nodes correctly. for _, jctx := range result.PreemptedJobs { job := jctx.Job - nodeId, ok := result.NodeIdByJobId[job.GetId()] + nodeId, ok := result.NodeIdByJobId[job.Id()] assert.True(t, ok) assert.NotEmpty(t, nodeId) // Check that preempted jobs are preempted from the node they were previously scheduled onto. - expectedNodeId := nodeIdByJobId[job.GetId()] - assert.Equal(t, expectedNodeId, nodeId, "job %s preempted from unexpected node", job.GetId()) + expectedNodeId := nodeIdByJobId[job.Id()] + assert.Equal(t, expectedNodeId, nodeId, "job %s preempted from unexpected node", job.Id()) } for _, jctx := range result.ScheduledJobs { job := jctx.Job - nodeId, ok := result.NodeIdByJobId[job.GetId()] + nodeId, ok := result.NodeIdByJobId[job.Id()] assert.True(t, ok) assert.NotEmpty(t, nodeId) @@ -1954,10 +1860,10 @@ func TestPreemptingQueueScheduler(t *testing.T) { // Check that scheduled jobs are consistently assigned to the same node. // (We don't allow moving jobs between nodes.) - if expectedNodeId, ok := nodeIdByJobId[job.GetId()]; ok { - assert.Equal(t, expectedNodeId, nodeId, "job %s scheduled onto unexpected node", job.GetId()) + if expectedNodeId, ok := nodeIdByJobId[job.Id()]; ok { + assert.Equal(t, expectedNodeId, nodeId, "job %s scheduled onto unexpected node", job.Id()) } else { - nodeIdByJobId[job.GetId()] = nodeId + nodeIdByJobId[job.Id()] = nodeId } } for jobId, nodeId := range result.NodeIdByJobId { @@ -2009,18 +1915,18 @@ func TestPreemptingQueueScheduler(t *testing.T) { require.NoError(t, err) for node := it.NextNode(); node != nil; node = it.NextNode() { for _, p := range priorities { - for resourceType, q := range node.AllocatableByPriority[p].Resources { - assert.NotEqual(t, -1, q.Cmp(resource.Quantity{}), "resource %s oversubscribed by %s on node %s", resourceType, q.String(), node.GetId()) + for _, r := range node.AllocatableByPriority[p].GetResources() { + assert.True(t, r.Value >= 0, "resource %s oversubscribed by %d on node %s", r.Name, r.Value, node.GetId()) } } } - err = jobDbTxn.BatchDelete(util.Map(queuedJobs, func(job *jobdb.Job) string { return job.GetId() })) + err = jobDbTxn.BatchDelete(armadaslices.Map(queuedJobs, func(job *jobdb.Job) string { return job.Id() })) require.NoError(t, err) var preemptedJobs []*jobdb.Job for _, jctx := range result.PreemptedJobs { - job := jctx.Job.(*jobdb.Job) + job := jctx.Job preemptedJobs = append( preemptedJobs, job. @@ -2038,9 +1944,9 @@ func TestPreemptingQueueScheduler(t *testing.T) { slices.SortFunc( result.ScheduledJobs, func(a, b *schedulercontext.JobSchedulingContext) int { - if a.Job.GetSubmitTime().Before(b.Job.GetSubmitTime()) { + if a.Job.SubmitTime().Before(b.Job.SubmitTime()) { return -1 - } else if b.Job.GetSubmitTime().Before(a.Job.GetSubmitTime()) { + } else if b.Job.SubmitTime().Before(a.Job.SubmitTime()) { return 1 } else { return 0 @@ -2049,8 +1955,8 @@ func TestPreemptingQueueScheduler(t *testing.T) { ) var scheduledJobs []*jobdb.Job for _, jctx := range result.ScheduledJobs { - job := jctx.Job.(*jobdb.Job) - jobId := job.GetId() + job := jctx.Job + jobId := job.Id() node, err := nodeDb.GetNode(result.NodeIdByJobId[jobId]) require.NotNil(t, node) require.NoError(t, err) @@ -2074,7 +1980,7 @@ func jobIdsByQueueFromJobContexts(jctxs []*schedulercontext.JobSchedulingContext rv := make(map[string][]string) for _, jctx := range jctxs { job := jctx.Job - rv[job.GetQueue()] = append(rv[job.GetQueue()], job.GetId()) + rv[job.Queue()] = append(rv[job.Queue()], job.Id()) } return rv } @@ -2185,7 +2091,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { } txn.Commit() - jobDb := jobdb.NewJobDb(tc.SchedulingConfig.PriorityClasses, tc.SchedulingConfig.DefaultPriorityClassName, stringinterner.New(1024)) + jobDb := jobdb.NewJobDb(tc.SchedulingConfig.PriorityClasses, tc.SchedulingConfig.DefaultPriorityClassName, stringinterner.New(1024), testfixtures.TestResourceListFactory) jobDbTxn := jobDb.WriteTxn() var queuedJobs []*jobdb.Job for _, jobs := range jobsByQueue { @@ -2237,8 +2143,6 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { sch := NewPreemptingQueueScheduler( sctx, constraints, - tc.SchedulingConfig.NodeEvictionProbability, - tc.SchedulingConfig.NodeOversubscriptionEvictionProbability, tc.SchedulingConfig.ProtectedFractionOfFairShare, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, @@ -2255,7 +2159,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { scheduledJobs[jctx.JobId] = true } err = jobDbTxn.BatchDelete( - util.Map( + armadaslices.Map( result.ScheduledJobs, func(jctx *schedulercontext.JobSchedulingContext) string { return jctx.JobId @@ -2265,8 +2169,8 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { require.NoError(b, err) jobsByNodeId := make(map[string][]*jobdb.Job) - for _, job := range ScheduledJobsFromSchedulerResult[*jobdb.Job](result) { - nodeId := result.NodeIdByJobId[job.GetId()] + for _, job := range ScheduledJobsFromSchedulerResult(result) { + nodeId := result.NodeIdByJobId[job.Id()] jobsByNodeId[nodeId] = append(jobsByNodeId[nodeId], job) } nodeDb, err = NewNodeDb(tc.SchedulingConfig, stringinterner.New(1024)) @@ -2299,8 +2203,6 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { sch := NewPreemptingQueueScheduler( sctx, constraints, - tc.SchedulingConfig.NodeEvictionProbability, - tc.SchedulingConfig.NodeOversubscriptionEvictionProbability, tc.SchedulingConfig.ProtectedFractionOfFairShare, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, diff --git a/internal/scheduler/publisher.go b/internal/scheduler/publisher.go index 598a00fc755..caf6716b59b 100644 --- a/internal/scheduler/publisher.go +++ b/internal/scheduler/publisher.go @@ -14,7 +14,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/eventutil" "github.com/armadaproject/armada/internal/common/logging" - "github.com/armadaproject/armada/internal/common/schedulers" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -92,9 +91,6 @@ func (p *PulsarPublisher) PublishMessages(ctx *armadacontext.Context, events []* msgs[i] = &pulsar.ProducerMessage{ Payload: bytes, Key: sequences[i].JobSetName, - Properties: map[string]string{ - schedulers.PropertyName: schedulers.PulsarSchedulerAttribute, - }, } } @@ -154,8 +150,7 @@ func (p *PulsarPublisher) PublishMarkers(ctx *armadacontext.Context, groupId uui } msg := &pulsar.ProducerMessage{ Properties: map[string]string{ - explicitPartitionKey: fmt.Sprintf("%d", i), - schedulers.PropertyName: schedulers.PulsarSchedulerAttribute, + explicitPartitionKey: fmt.Sprintf("%d", i), }, Payload: bytes, } diff --git a/internal/scheduler/queue/queue_cache.go b/internal/scheduler/queue/queue_cache.go new file mode 100644 index 00000000000..e46badc6c11 --- /dev/null +++ b/internal/scheduler/queue/queue_cache.go @@ -0,0 +1,82 @@ +package queue + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/pkg/api" +) + +// QueueCache is an in-memory cache of available queues +type QueueCache interface { + // Get returns all available queues + GetAll(ctx *armadacontext.Context) ([]*api.Queue, error) +} + +// ApiQueueCache is an implementation of QueueCache that fetches queues from the Armada API. +// We cache the queues in memory so that we can continue scheduling even if the API is unavailable +type ApiQueueCache struct { + updateFrequency time.Duration + apiClient api.SubmitClient + queues atomic.Pointer[[]*api.Queue] +} + +func NewQueueCache(apiClient api.SubmitClient, updateFrequency time.Duration) *ApiQueueCache { + return &ApiQueueCache{ + updateFrequency: updateFrequency, + apiClient: apiClient, + queues: atomic.Pointer[[]*api.Queue]{}, + } +} + +func (c *ApiQueueCache) Run(ctx *armadacontext.Context) error { + if err := c.fetchQueues(ctx); err != nil { + ctx.Warnf("Error fetching queues: %v", err) + } + ticker := time.NewTicker(c.updateFrequency) + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + if err := c.fetchQueues(ctx); err != nil { + ctx.Warnf("Error fetching queues: %v", err) + } + } + } +} + +func (c *ApiQueueCache) GetAll(_ *armadacontext.Context) ([]*api.Queue, error) { + queues := c.queues.Load() + if queues == nil { + return nil, fmt.Errorf("no queues available") + } + return *queues, nil +} + +func (c *ApiQueueCache) fetchQueues(ctx *armadacontext.Context) error { + start := time.Now() + stream, err := c.apiClient.GetQueues(ctx, &api.StreamingQueueGetRequest{}) + if err != nil { + return err + } + queues := make([]*api.Queue, 0) + for { + msg, err := stream.Recv() + if err != nil { + return err + } + switch msg.GetEvent().(type) { + case *api.StreamingQueueMessage_Queue: + queues = append(queues, msg.GetQueue()) + case *api.StreamingQueueMessage_End: + c.queues.Store(&queues) + ctx.Infof("Refreshed Queues in %s", time.Since(start)) + return nil + default: + return fmt.Errorf("unknown event of type %T", msg.GetEvent()) + } + } +} diff --git a/internal/scheduler/queue/queue_cache_test.go b/internal/scheduler/queue/queue_cache_test.go new file mode 100644 index 00000000000..3d43e491ea3 --- /dev/null +++ b/internal/scheduler/queue/queue_cache_test.go @@ -0,0 +1,92 @@ +package queue + +import ( + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" + schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" + "github.com/armadaproject/armada/pkg/api" +) + +func TestFetch(t *testing.T) { + tests := map[string]struct { + queues []*api.Queue + streamError bool + }{ + "No Queues": { + queues: []*api.Queue{}, + }, + "One Queue": { + queues: []*api.Queue{{Name: "testQueue1"}}, + }, + "Two Queues": { + queues: []*api.Queue{ + {Name: "testQueue1"}, + {Name: "testQueue2"}, + }, + }, + "Immediate Steam Error": { + queues: []*api.Queue{}, + streamError: true, + }, + "Steam Error Mid-Stream": { + queues: []*api.Queue{{Name: "testQueue1"}}, + streamError: true, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) + ctrl := gomock.NewController(t) + mockApiClient := schedulermocks.NewMockSubmitClient(ctrl) + mockStream := schedulermocks.NewMockSubmit_GetQueuesClient(ctrl) + for _, queue := range tc.queues { + mockStream. + EXPECT(). + Recv(). + Return( + &api.StreamingQueueMessage{ + Event: &api.StreamingQueueMessage_Queue{Queue: queue}, + }, nil) + } + + if tc.streamError { + mockStream. + EXPECT(). + Recv(). + Return(nil, fmt.Errorf("dummy error")) + } else { + mockStream. + EXPECT(). + Recv(). + Return( + &api.StreamingQueueMessage{ + Event: &api.StreamingQueueMessage_End{}, + }, nil) + } + + mockApiClient.EXPECT().GetQueues(ctx, gomock.Any()).Return(mockStream, nil).Times(1) + + cache := NewQueueCache(mockApiClient, 1*time.Millisecond) + fetchErr := cache.fetchQueues(ctx) + queues, getErr := cache.GetAll(ctx) + + if tc.streamError { + assert.Error(t, fetchErr) + assert.Error(t, getErr) + } else { + assert.NoError(t, fetchErr) + assert.NoError(t, getErr) + assert.Equal(t, tc.queues, queues) + } + + ctrl.Finish() + cancel() + }) + } +} diff --git a/internal/scheduler/queue_scheduler.go b/internal/scheduler/queue_scheduler.go index 85524c02f57..51ccf8ccfbb 100644 --- a/internal/scheduler/queue_scheduler.go +++ b/internal/scheduler/queue_scheduler.go @@ -2,14 +2,14 @@ package scheduler import ( "container/heap" + "fmt" "reflect" - "strconv" - - "github.com/armadaproject/armada/internal/armada/configuration" + "time" "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" + armadamaps "github.com/armadaproject/armada/internal/common/maps" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" @@ -42,7 +42,7 @@ func NewQueueScheduler( } gangIteratorsByQueue := make(map[string]*QueuedGangIterator) for queue, it := range jobIteratorByQueue { - gangIteratorsByQueue[queue] = NewQueuedGangIterator(sctx, it, constraints.MaxQueueLookback, true) + gangIteratorsByQueue[queue] = NewQueuedGangIterator(sctx, it, constraints.GetMaxQueueLookBack(), true) } candidateGangIterator, err := NewCandidateGangIterator(sctx, sctx.FairnessCostProvider, gangIteratorsByQueue) if err != nil { @@ -61,9 +61,18 @@ func (sch *QueueScheduler) SkipUnsuccessfulSchedulingKeyCheck() { func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResult, error) { var scheduledJobs []*schedulercontext.JobSchedulingContext - var failedJobs []*schedulercontext.JobSchedulingContext + nodeIdByJobId := make(map[string]string) additionalAnnotationsByJobId := make(map[string]map[string]string) + ctx.Info("Looping through candidate gangs...") + + type queueStats struct { + gangCount int + jobCount int + time time.Duration + } + + statsPerQueue := map[string]queueStats{} for { // Peek() returns the next gang to try to schedule. Call Clear() before calling Peek() again. // Calling Clear() after (failing to) schedule ensures we get the next gang in order of smallest fair share. @@ -89,22 +98,21 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul return nil, err default: } + + start := time.Now() if ok, unschedulableReason, err := sch.gangScheduler.Schedule(ctx, gctx); err != nil { return nil, err } else if ok { - numScheduled := gctx.Fit().NumScheduled for _, jctx := range gctx.JobSchedulingContexts { if pctx := jctx.PodSchedulingContext; pctx.IsSuccessful() { scheduledJobs = append(scheduledJobs, jctx) nodeIdByJobId[jctx.JobId] = pctx.NodeId - additionalAnnotationsByJobId[jctx.JobId] = map[string]string{configuration.GangNumJobsScheduledAnnotation: strconv.Itoa(numScheduled)} - } else if jctx.ShouldFail { - failedJobs = append(failedJobs, jctx) } } } else if schedulerconstraints.IsTerminalUnschedulableReason(unschedulableReason) { // If unschedulableReason indicates no more new jobs can be scheduled, // instruct the underlying iterator to only yield evicted jobs from now on. + sch.schedulingContext.TerminationReason = unschedulableReason sch.candidateGangIterator.OnlyYieldEvicted() } else if schedulerconstraints.IsTerminalQueueUnschedulableReason(unschedulableReason) { // If unschedulableReason indicates no more new jobs can be scheduled for this queue, @@ -112,12 +120,25 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul sch.candidateGangIterator.OnlyYieldEvictedForQueue(gctx.Queue) } + duration := time.Now().Sub(start) + stats := statsPerQueue[gctx.Queue] + stats.gangCount++ + stats.jobCount += gctx.Cardinality() + stats.time += duration + statsPerQueue[gctx.Queue] = stats + if duration.Seconds() > 1 { + ctx.Infof("Slow schedule: queue %s, gang cardinality %d, first job id %s, time %fs", gctx.Queue, gctx.Cardinality(), gctx.JobIds()[0], duration.Seconds()) + } + // Clear() to get the next gang in order of smallest fair share. // Calling clear here ensures the gang scheduled in this iteration is accounted for. if err := sch.candidateGangIterator.Clear(); err != nil { return nil, err } } + ctx.Infof("Finished looping through candidate gangs: details %v", armadamaps.MapValues(statsPerQueue, func(s queueStats) string { + return fmt.Sprintf("{gangs=%d, jobs=%d, time=%fs}", s.gangCount, s.jobCount, s.time.Seconds()) + })) if sch.schedulingContext.TerminationReason == "" { sch.schedulingContext.TerminationReason = "no remaining candidate jobs" } @@ -127,7 +148,6 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul return &SchedulerResult{ PreemptedJobs: nil, ScheduledJobs: scheduledJobs, - FailedJobs: failedJobs, NodeIdByJobId: nodeIdByJobId, AdditionalAnnotationsByJobId: additionalAnnotationsByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, diff --git a/internal/scheduler/queue_scheduler_test.go b/internal/scheduler/queue_scheduler_test.go index 472b0b782fe..5f9493e041b 100644 --- a/internal/scheduler/queue_scheduler_test.go +++ b/internal/scheduler/queue_scheduler_test.go @@ -4,9 +4,6 @@ import ( "fmt" "testing" - "github.com/armadaproject/armada/pkg/api" - "github.com/armadaproject/armada/pkg/client/queue" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -15,19 +12,19 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" + armadaconfiguration "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" - "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" + "github.com/armadaproject/armada/pkg/api" ) func TestQueueScheduler(t *testing.T) { @@ -39,7 +36,7 @@ func TestQueueScheduler(t *testing.T) { // Set to the total resources across all nodes if not provided. TotalResources schedulerobjects.ResourceList // Queues - Queues []queue.Queue + Queues []*api.Queue // Initial resource usage for all queues. InitialAllocatedByQueueAndPriorityClass map[string]schedulerobjects.QuantityByTAndResourceType[string] // Nodes to be considered by the scheduler. @@ -113,7 +110,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 3), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 1), ), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, ExpectedScheduledIndices: []int{0, 11, 14}, }, "MaximumSchedulingBurst is not exceeded by gangs": { @@ -187,7 +184,7 @@ func TestQueueScheduler(t *testing.T) { }, "per queue, resource class, and pool cpu limit": { SchedulingConfig: testfixtures.TestSchedulingConfig(), - Queues: []queue.Queue{ + Queues: []*api.Queue{ { Name: "A", PriorityFactor: 1.0, @@ -210,7 +207,7 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32)), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, ExpectedScheduledIndices: armadaslices.Concatenate(testfixtures.IntRange(0, 15), testfixtures.IntRange(32, 47)), }, "fairness three queues": { @@ -221,7 +218,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("C", testfixtures.PriorityClass0, 32), ), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}, {Name: "C", PriorityFactor: 1.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}, {Name: "C", PriorityFactor: 1.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 10), testfixtures.IntRange(32, 42), @@ -235,7 +232,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 96), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 96), ), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 63), testfixtures.IntRange(96, 127), @@ -249,7 +246,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 96), testfixtures.N1Cpu4GiJobs("C", testfixtures.PriorityClass0, 96), ), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}, {Name: "C", PriorityFactor: 10.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}, {Name: "C", PriorityFactor: 10.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 59), testfixtures.IntRange(96, 125), @@ -263,7 +260,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32), ), - Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, + Queues: []*api.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, InitialAllocatedByQueueAndPriorityClass: map[string]schedulerobjects.QuantityByTAndResourceType[string]{ "A": { testfixtures.PriorityClass0: schedulerobjects.ResourceList{ @@ -347,7 +344,7 @@ func TestQueueScheduler(t *testing.T) { ), Queues: testfixtures.SingleQueuePriorityOne("A"), MinimumJobSize: map[string]resource.Quantity{ - "gpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("1"), }, ExpectedScheduledIndices: []int{2}, }, @@ -361,7 +358,7 @@ func TestQueueScheduler(t *testing.T) { ), Queues: testfixtures.SingleQueuePriorityOne("A"), MinimumJobSize: map[string]resource.Quantity{ - "gpu": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, ExpectedScheduledIndices: nil, }, @@ -423,16 +420,14 @@ func TestQueueScheduler(t *testing.T) { Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.WithAnnotationsJobs(map[string]string{ - configuration.GangIdAnnotation: "my-gang", - configuration.GangCardinalityAnnotation: "2", - configuration.GangMinimumCardinalityAnnotation: "1", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithAnnotationsJobs(map[string]string{ - configuration.GangIdAnnotation: "my-gang", - configuration.GangCardinalityAnnotation: "2", - configuration.GangMinimumCardinalityAnnotation: "1", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), @@ -451,16 +446,14 @@ func TestQueueScheduler(t *testing.T) { Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.WithAnnotationsJobs(map[string]string{ - configuration.GangIdAnnotation: "my-gang", - configuration.GangCardinalityAnnotation: "2", - configuration.GangMinimumCardinalityAnnotation: "2", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithAnnotationsJobs(map[string]string{ - configuration.GangIdAnnotation: "my-gang", - configuration.GangCardinalityAnnotation: "2", - configuration.GangMinimumCardinalityAnnotation: "2", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), @@ -530,19 +523,19 @@ func TestQueueScheduler(t *testing.T) { tc.TotalResources = nodeDb.TotalResources() } - queueNameToQueue := map[string]*queue.Queue{} + queueNameToQueue := map[string]*api.Queue{} for _, q := range tc.Queues { - queueNameToQueue[q.Name] = &q + queueNameToQueue[q.Name] = q } indexByJobId := make(map[string]int) for i, job := range tc.Jobs { - if _, ok := queueNameToQueue[job.GetQueue()]; !ok { + if _, ok := queueNameToQueue[job.Queue()]; !ok { panic(fmt.Sprintf("queue %s does not exist", job.Queue())) } - indexByJobId[job.GetId()] = i + indexByJobId[job.Id()] = i } - legacySchedulerJobs := make([]interfaces.LegacySchedulerJob, len(tc.Jobs)) + legacySchedulerJobs := make([]*jobdb.Job, len(tc.Jobs)) for i, job := range tc.Jobs { legacySchedulerJobs[i] = job } @@ -616,7 +609,7 @@ func TestQueueScheduler(t *testing.T) { expectedScheduledIndicesByQueue := armadaslices.GroupByFunc( tc.ExpectedScheduledIndices, func(i int) string { - return tc.Jobs[i].GetQueue() + return tc.Jobs[i].Queue() }, ) expectedSuccessfulOrNotAttemptedIndices := armadaslices.MapAndGroupByFuncs( @@ -633,7 +626,7 @@ func TestQueueScheduler(t *testing.T) { expectedUnsuccessfulIndicesByQueue := armadaslices.GroupByFunc( expectedUnsuccessfulIndices, func(i int) string { - return tc.Jobs[i].GetQueue() + return tc.Jobs[i].Queue() }, ) actualSuccessfulIndicesByQueue := make(map[string][]int) @@ -647,7 +640,7 @@ func TestQueueScheduler(t *testing.T) { qctx := sctx.QueueSchedulingContexts[queue] require.NotNil(t, queue) - is := util.Map( + is := armadaslices.Map( maps.Keys(qctx.SuccessfulJobSchedulingContexts), func(jobId string) int { return indexByJobId[jobId] @@ -658,7 +651,7 @@ func TestQueueScheduler(t *testing.T) { actualSuccessfulIndicesByQueue[queue] = is } - is = util.Map( + is = armadaslices.Map( maps.Keys(qctx.UnsuccessfulJobSchedulingContexts), func(jobId string) int { return indexByJobId[jobId] @@ -732,12 +725,12 @@ func TestQueueScheduler(t *testing.T) { func NewNodeDb(config configuration.SchedulingConfig, stringInterner *stringinterner.StringInterner) (*nodedb.NodeDb, error) { nodeDb, err := nodedb.NewNodeDb( config.PriorityClasses, - config.MaxExtraNodesToConsider, config.IndexedResources, config.IndexedTaints, config.IndexedNodeLabels, config.WellKnownNodeTypes, stringInterner, + testfixtures.TestResourceListFactory, ) if err != nil { return nil, err diff --git a/internal/scheduler/result.go b/internal/scheduler/result.go index 426d7790a27..9c42e107810 100644 --- a/internal/scheduler/result.go +++ b/internal/scheduler/result.go @@ -2,7 +2,7 @@ package scheduler import ( schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/jobdb" ) // SchedulerResult is returned by Rescheduler.Schedule(). @@ -11,9 +11,6 @@ type SchedulerResult struct { PreemptedJobs []*schedulercontext.JobSchedulingContext // Queued jobs that should be scheduled. ScheduledJobs []*schedulercontext.JobSchedulingContext - // Queued jobs that could not be scheduled. - // This is used to fail jobs that could not schedule above `minimumGangCardinality`. - FailedJobs []*schedulercontext.JobSchedulingContext // For each preempted job, maps the job id to the id of the node on which the job was running. // For each scheduled job, maps the job id to the id of the node on which the job should be scheduled. NodeIdByJobId map[string]string @@ -26,29 +23,20 @@ type SchedulerResult struct { AdditionalAnnotationsByJobId map[string]map[string]string } -// PreemptedJobsFromSchedulerResult returns the slice of preempted jobs in the result cast to type T. -func PreemptedJobsFromSchedulerResult[T interfaces.LegacySchedulerJob](sr *SchedulerResult) []T { - rv := make([]T, len(sr.PreemptedJobs)) +// PreemptedJobsFromSchedulerResult returns the slice of preempted jobs in the result. +func PreemptedJobsFromSchedulerResult(sr *SchedulerResult) []*jobdb.Job { + rv := make([]*jobdb.Job, len(sr.PreemptedJobs)) for i, jctx := range sr.PreemptedJobs { - rv[i] = jctx.Job.(T) + rv[i] = jctx.Job } return rv } -// ScheduledJobsFromSchedulerResult returns the slice of scheduled jobs in the result cast to type T. -func ScheduledJobsFromSchedulerResult[T interfaces.LegacySchedulerJob](sr *SchedulerResult) []T { - rv := make([]T, len(sr.ScheduledJobs)) +// ScheduledJobsFromSchedulerResult returns the slice of scheduled jobs in the result. +func ScheduledJobsFromSchedulerResult(sr *SchedulerResult) []*jobdb.Job { + rv := make([]*jobdb.Job, len(sr.ScheduledJobs)) for i, jctx := range sr.ScheduledJobs { - rv[i] = jctx.Job.(T) - } - return rv -} - -// FailedJobsFromSchedulerResult returns the slice of scheduled jobs in the result cast to type T. -func FailedJobsFromSchedulerResult[T interfaces.LegacySchedulerJob](sr *SchedulerResult) []T { - rv := make([]T, len(sr.FailedJobs)) - for i, jctx := range sr.FailedJobs { - rv[i] = jctx.Job.(T) + rv[i] = jctx.Job } return rv } diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 447940aa597..a0fb86bd7ae 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -9,7 +9,7 @@ import ( "github.com/pkg/errors" "github.com/renstrom/shortuuid" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -57,13 +57,13 @@ type Scheduler struct { // The label used when setting node anti affinities. nodeIdLabel string // If an executor fails to report in for this amount of time, - // all jobs assigne to that executor are cancelled. + // all jobs assigned to that executor are cancelled. executorTimeout time.Duration // The time the previous scheduling round ended previousSchedulingRoundEnd time.Time // Used for timing decisions (e.g., sleep). // Injected here so that we can mock it out for testing. - clock clock.Clock + clock clock.WithTicker // Stores active jobs (i.e. queued or running). jobDb *jobdb.JobDb // Highest offset we've read from Postgres on the jobs table. @@ -175,6 +175,9 @@ func (s *Scheduler) Run(ctx *armadacontext.Context) error { // // TODO: Once the Pulsar client supports transactions, we can guarantee consistency even in case of errors. shouldSchedule := s.clock.Now().Sub(s.previousSchedulingRoundEnd) > s.schedulePeriod + if !shouldSchedule { + ctx.Info("Won't schedule this cycle as still within schedulePeriod") + } result, err := s.cycle(ctx, fullUpdate, leaderToken, shouldSchedule) if err != nil { @@ -232,14 +235,17 @@ func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToke overallSchedulerResult := SchedulerResult{} // Update job state. + ctx.Info("Syncing internal state with database") updatedJobs, jsts, err := s.syncState(ctx) if err != nil { return overallSchedulerResult, err } + ctx.Info("Finished syncing state") // Only the leader may make decisions; exit if not leader. // Only export metrics if leader. if !s.leaderController.ValidateToken(leaderToken) { + ctx.Info("Not the leader so will not attempt to schedule") s.schedulerMetrics.Disable() return overallSchedulerResult, err } else { @@ -267,10 +273,12 @@ func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToke failedRunIds = append(failedRunIds, run.Id()) } } + ctx.Info("Fetching job run errors") jobRepoRunErrorsByRunId, err := s.jobRepository.FetchJobRunErrors(ctx, failedRunIds) if err != nil { return overallSchedulerResult, err } + ctx.Infof("Fetched %d job run errors", len(jobRepoRunErrorsByRunId)) // Update metrics. if !s.schedulerMetrics.IsDisabled() { @@ -296,34 +304,38 @@ func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToke t = time.Now() } if jst.Failed { - s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), run.Executor(), false, t) + s.failureEstimator.Push(run.NodeName(), jst.Job.Queue(), run.Executor(), false, t) } if jst.Succeeded { - s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), run.Executor(), true, t) + s.failureEstimator.Push(run.NodeName(), jst.Job.Queue(), run.Executor(), true, t) } } s.failureEstimator.Update() } // Generate any eventSequences that came out of synchronising the db state. + ctx.Info("Generating update messages based on reconciliation changes") events, err := s.generateUpdateMessages(ctx, txn, updatedJobs, jobRepoRunErrorsByRunId) if err != nil { return overallSchedulerResult, err } + ctx.Infof("Finished generating updates messages, generating %d events", len(events)) - // Expire any jobs running on clusters that haven't heartbeated within the configured deadline. - expirationEvents, err := s.expireJobsIfNecessary(ctx, txn) + // Validate that any new jobs can be scheduled + validationEvents, err := s.submitCheck(ctx, txn) if err != nil { return overallSchedulerResult, err } - events = append(events, expirationEvents...) + events = append(events, validationEvents...) - // Request cancel for any jobs that exceed queueTtl - queueTtlCancelEvents, err := s.cancelQueuedJobsIfExpired(txn) + // Expire any jobs running on clusters that haven't heartbeated within the configured deadline. + ctx.Info("Looking for jobs to expire") + expirationEvents, err := s.expireJobsIfNecessary(ctx, txn) if err != nil { return overallSchedulerResult, err } - events = append(events, queueTtlCancelEvents...) + ctx.Infof("Finished looking for jobs to expire, generating %d events", len(expirationEvents)) + events = append(events, expirationEvents...) // Schedule jobs. if shouldSchedule { @@ -349,18 +361,22 @@ func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToke return s.leaderController.ValidateToken(leaderToken) } start := s.clock.Now() + ctx.Infof("Starting to publish %d eventSequences to pulsar", len(events)) if err = s.publisher.PublishMessages(ctx, events, isLeader); err != nil { return overallSchedulerResult, err } - ctx.Infof("published %d eventSequences to pulsar in %s", len(events), s.clock.Since(start)) + ctx.Infof("Published %d eventSequences to pulsar in %s", len(events), s.clock.Since(start)) // Optionally assert that the jobDb is in a valid state and then commit. if s.enableAssertions { + ctx.Infof("Performing assertions on current state") if err := txn.Assert(false); err != nil { return overallSchedulerResult, err } } + ctx.Info("Committing cycle transaction") txn.Commit() + ctx.Info("Completed committing cycle transaction") // Update metrics based on overallSchedulerResult. if err := s.updateMetricsFromSchedulerResult(ctx, overallSchedulerResult); err != nil { @@ -379,13 +395,8 @@ func (s *Scheduler) updateMetricsFromSchedulerResult(ctx *armadacontext.Context, return err } } - for _, jctx := range overallSchedulerResult.FailedJobs { - if err := s.schedulerMetrics.UpdateFailed(ctx, jctx.Job.(*jobdb.Job), nil); err != nil { - return err - } - } // UpdatePreempted is called from within UpdateFailed if the job has a JobRunPreemptedError. - // This is to make sure that preempttion is counted only when the job is actually preempted, not when the scheduler decides to preempt it. + // This is to make sure that preemption is counted only when the job is actually preempted, not when the scheduler decides to preempt it. return nil } @@ -448,7 +459,7 @@ func (s *Scheduler) createSchedulingInfoWithNodeAntiAffinityForAttemptedRuns(job newSchedulingInfo.Version = job.JobSchedulingInfo().Version + 1 podRequirements := newSchedulingInfo.GetPodRequirements() if podRequirements == nil { - return nil, errors.Errorf("no pod scheduling requirement found for job %s", job.GetId()) + return nil, errors.Errorf("no pod scheduling requirement found for job %s", job.Id()) } newAffinity := podRequirements.Affinity if newAffinity == nil { @@ -467,13 +478,26 @@ func (s *Scheduler) createSchedulingInfoWithNodeAntiAffinityForAttemptedRuns(job return newSchedulingInfo, nil } -func (s *Scheduler) addNodeAntiAffinitiesForAttemptedRunsIfSchedulable(job *jobdb.Job) (*jobdb.Job, bool, error) { +func (s *Scheduler) addNodeAntiAffinitiesForAttemptedRunsIfSchedulable(ctx *armadacontext.Context, job *jobdb.Job) (*jobdb.Job, bool, error) { schedulingInfoWithNodeAntiAffinity, err := s.createSchedulingInfoWithNodeAntiAffinityForAttemptedRuns(job) if err != nil { return nil, false, err } - job = job.WithJobSchedulingInfo(schedulingInfoWithNodeAntiAffinity) - isSchedulable, _ := s.submitChecker.CheckJobDbJobs([]*jobdb.Job{job}) + + job, err = job.WithJobSchedulingInfo(schedulingInfoWithNodeAntiAffinity) + if err != nil { + // should never happen - requirements haven't changed + panic(err) + } + results, err := s.submitChecker.Check(ctx, []*jobdb.Job{job}) + if err != nil { + return nil, false, err + } + result, ok := results[job.Id()] + isSchedulable := false + if ok { + isSchedulable = result.isSchedulable + } return job, isSchedulable, nil } @@ -484,8 +508,8 @@ func (s *Scheduler) eventsFromSchedulerResult(result *SchedulerResult) ([]*armad // EventsFromSchedulerResult generates necessary EventSequences from the provided SchedulerResult. func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*armadaevents.EventSequence, error) { - eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)+len(result.FailedJobs)) - eventSequences, err := AppendEventSequencesFromPreemptedJobs(eventSequences, PreemptedJobsFromSchedulerResult[*jobdb.Job](result), time) + eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)) + eventSequences, err := AppendEventSequencesFromPreemptedJobs(eventSequences, PreemptedJobsFromSchedulerResult(result), time) if err != nil { return nil, err } @@ -493,10 +517,6 @@ func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*arma if err != nil { return nil, err } - eventSequences, err = AppendEventSequencesFromUnschedulableJobs(eventSequences, FailedJobsFromSchedulerResult[*jobdb.Job](result), time) - if err != nil { - return nil, err - } return eventSequences, nil } @@ -525,8 +545,10 @@ func createEventsForPreemptedJob(jobId *armadaevents.Uuid, runId *armadaevents.U Created: &time, Event: &armadaevents.EventSequence_Event_JobRunPreempted{ JobRunPreempted: &armadaevents.JobRunPreempted{ - PreemptedRunId: runId, - PreemptedJobId: jobId, + PreemptedRunId: runId, + PreemptedRunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + PreemptedJobId: jobId, + PreemptedJobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), }, }, }, @@ -534,8 +556,10 @@ func createEventsForPreemptedJob(jobId *armadaevents.Uuid, runId *armadaevents.U Created: &time, Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: runId, - JobId: jobId, + RunId: runId, + RunIdStr: armadaevents.MustUuidStringFromProtoUuid(runId), + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Errors: []*armadaevents.Error{ { Terminal: true, @@ -551,7 +575,8 @@ func createEventsForPreemptedJob(jobId *armadaevents.Uuid, runId *armadaevents.U Created: &time, Event: &armadaevents.EventSequence_Event_JobErrors{ JobErrors: &armadaevents.JobErrors{ - JobId: jobId, + JobId: jobId, + JobIdStr: armadaevents.MustUlidStringFromProtoUuid(jobId), Errors: []*armadaevents.Error{ { Terminal: true, @@ -568,7 +593,7 @@ func createEventsForPreemptedJob(jobId *armadaevents.Uuid, runId *armadaevents.U func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventSequence, jctxs []*schedulercontext.JobSchedulingContext, additionalAnnotationsByJobId map[string]map[string]string) ([]*armadaevents.EventSequence, error) { for _, jctx := range jctxs { - job := jctx.Job.(*jobdb.Job) + job := jctx.Job jobId, err := armadaevents.ProtoUuidFromUlidString(job.Id()) if err != nil { return nil, err @@ -578,7 +603,7 @@ func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventS return nil, errors.Errorf("attempting to generate lease eventSequences for job %s with no associated runs", job.Id()) } runCreationTime := time.Unix(0, job.ActiveRunTimestamp()) - scheduledAtPriority, hasScheduledAtPriority := job.GetScheduledAtPriority() + scheduledAtPriority, hasScheduledAtPriority := job.ScheduledAtPriority() eventSequences = append(eventSequences, &armadaevents.EventSequence{ Queue: job.Queue(), JobSetName: job.Jobset(), // TODO: Rename to JobSet. @@ -588,7 +613,9 @@ func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventS Event: &armadaevents.EventSequence_Event_JobRunLeased{ JobRunLeased: &armadaevents.JobRunLeased{ RunId: armadaevents.ProtoUuidFromUuid(run.Id()), + RunIdStr: run.Id().String(), JobId: jobId, + JobIdStr: job.Id(), ExecutorId: run.Executor(), // NodeId here refers to the unique identifier of the node in an executor cluster, // which is referred to as the NodeName within the scheduler. @@ -610,39 +637,13 @@ func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventS return eventSequences, nil } -func AppendEventSequencesFromUnschedulableJobs(eventSequences []*armadaevents.EventSequence, jobs []*jobdb.Job, time time.Time) ([]*armadaevents.EventSequence, error) { - for _, job := range jobs { - jobId, err := armadaevents.ProtoUuidFromUlidString(job.GetId()) - if err != nil { - return nil, err - } - gangJobUnschedulableError := &armadaevents.Error{ - Terminal: true, - Reason: &armadaevents.Error_GangJobUnschedulable{GangJobUnschedulable: &armadaevents.GangJobUnschedulable{Message: "Job did not meet the minimum gang cardinality"}}, - } - eventSequences = append(eventSequences, &armadaevents.EventSequence{ - Queue: job.GetQueue(), - JobSetName: job.GetJobSet(), - Events: []*armadaevents.EventSequence_Event{ - { - Created: &time, - Event: &armadaevents.EventSequence_Event_JobErrors{ - JobErrors: &armadaevents.JobErrors{JobId: jobId, Errors: []*armadaevents.Error{gangJobUnschedulableError}}, - }, - }, - }, - }) - } - return eventSequences, nil -} - // generateUpdateMessages generates EventSequences representing the state changes on updated jobs. // If there are no state changes then an empty slice will be returned. -func (s *Scheduler) generateUpdateMessages(_ *armadacontext.Context, txn *jobdb.Txn, updatedJobs []*jobdb.Job, jobRunErrors map[uuid.UUID]*armadaevents.Error) ([]*armadaevents.EventSequence, error) { +func (s *Scheduler) generateUpdateMessages(ctx *armadacontext.Context, txn *jobdb.Txn, updatedJobs []*jobdb.Job, jobRunErrors map[uuid.UUID]*armadaevents.Error) ([]*armadaevents.EventSequence, error) { // Generate any eventSequences that came out of synchronising the db state. var events []*armadaevents.EventSequence for _, job := range updatedJobs { - jobEvents, err := s.generateUpdateMessagesFromJob(job, jobRunErrors, txn) + jobEvents, err := s.generateUpdateMessagesFromJob(ctx, job, jobRunErrors, txn) if err != nil { return nil, err } @@ -655,7 +656,7 @@ func (s *Scheduler) generateUpdateMessages(_ *armadacontext.Context, txn *jobdb. // generateUpdateMessages generates an EventSequence representing the state changes for a single job. // If there are no state changes it returns nil. -func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors map[uuid.UUID]*armadaevents.Error, txn *jobdb.Txn) (*armadaevents.EventSequence, error) { +func (s *Scheduler) generateUpdateMessagesFromJob(ctx *armadacontext.Context, job *jobdb.Job, jobRunErrors map[uuid.UUID]*armadaevents.Error, txn *jobdb.Txn) (*armadaevents.EventSequence, error) { var events []*armadaevents.EventSequence_Event // Is the job already in a terminal state? If so then don't send any more messages @@ -676,6 +677,7 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m Event: &armadaevents.EventSequence_Event_ReprioritisedJob{ ReprioritisedJob: &armadaevents.ReprioritisedJob{ JobId: jobId, + JobIdStr: job.Id(), Priority: job.Priority(), }, }, @@ -685,35 +687,64 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m // Has the job been requested cancelled. If so, cancel the job if job.CancelRequested() { - for _, run := range job.AllRuns() { - job = job.WithUpdatedRun(run.WithRunning(false).WithoutTerminal().WithCancelled(true)) + if job.HasRuns() { + lastRun := job.LatestRun() + job = job.WithUpdatedRun(lastRun.WithoutTerminal().WithCancelled(true)) + + events = append(events, &armadaevents.EventSequence_Event{ + Created: s.now(), + Event: &armadaevents.EventSequence_Event_JobRunCancelled{ + JobRunCancelled: &armadaevents.JobRunCancelled{ + RunId: armadaevents.ProtoUuidFromUuid(lastRun.Id()), + RunIdStr: lastRun.Id().String(), + JobId: jobId, + JobIdStr: job.Id(), + }, + }, + }) } job = job.WithQueued(false).WithoutTerminal().WithCancelled(true) cancel := &armadaevents.EventSequence_Event{ Created: s.now(), Event: &armadaevents.EventSequence_Event_CancelledJob{ - CancelledJob: &armadaevents.CancelledJob{JobId: jobId}, + CancelledJob: &armadaevents.CancelledJob{JobId: jobId, JobIdStr: job.Id()}, }, } events = append(events, cancel) } else if job.CancelByJobsetRequested() { - for _, run := range job.AllRuns() { - job = job.WithUpdatedRun(run.WithRunning(false).WithoutTerminal().WithCancelled(true)) - } job = job.WithQueued(false).WithoutTerminal().WithCancelled(true) cancelRequest := &armadaevents.EventSequence_Event{ Created: s.now(), Event: &armadaevents.EventSequence_Event_CancelJob{ - CancelJob: &armadaevents.CancelJob{JobId: jobId}, + CancelJob: &armadaevents.CancelJob{JobId: jobId, JobIdStr: job.Id()}, }, } + events = append(events, cancelRequest) + + if job.HasRuns() { + lastRun := job.LatestRun() + job = job.WithUpdatedRun(lastRun.WithoutTerminal().WithCancelled(true)) + + events = append(events, &armadaevents.EventSequence_Event{ + Created: s.now(), + Event: &armadaevents.EventSequence_Event_JobRunCancelled{ + JobRunCancelled: &armadaevents.JobRunCancelled{ + RunId: armadaevents.ProtoUuidFromUuid(lastRun.Id()), + RunIdStr: lastRun.Id().String(), + JobId: jobId, + JobIdStr: job.Id(), + }, + }, + }) + } + cancel := &armadaevents.EventSequence_Event{ Created: s.now(), Event: &armadaevents.EventSequence_Event_CancelledJob{ - CancelledJob: &armadaevents.CancelledJob{JobId: jobId}, + CancelledJob: &armadaevents.CancelledJob{JobId: jobId, JobIdStr: job.Id()}, }, } - events = append(events, cancelRequest, cancel) + events = append(events, cancel) } else if job.HasRuns() { lastRun := job.LatestRun() // InTerminalState states. Can only have one of these @@ -723,19 +754,20 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m Created: s.now(), Event: &armadaevents.EventSequence_Event_JobSucceeded{ JobSucceeded: &armadaevents.JobSucceeded{ - JobId: jobId, + JobId: jobId, + JobIdStr: job.Id(), }, }, } events = append(events, jobSucceeded) } else if lastRun.Failed() && !job.Queued() { - failFast := job.GetAnnotations()[configuration.FailFastAnnotation] == "true" + failFast := job.Annotations()[configuration.FailFastAnnotation] == "true" requeueJob := !failFast && lastRun.Returned() && job.NumAttempts() < s.maxAttemptedRuns if requeueJob && lastRun.RunAttempted() { - jobWithAntiAffinity, schedulable, err := s.addNodeAntiAffinitiesForAttemptedRunsIfSchedulable(job) + jobWithAntiAffinity, schedulable, err := s.addNodeAntiAffinitiesForAttemptedRunsIfSchedulable(ctx, job) if err != nil { - return nil, errors.Errorf("unable to set node anti-affinity for job %s because %s", job.GetId(), err) + return nil, errors.Errorf("unable to set node anti-affinity for job %s because %s", job.Id(), err) } else { if schedulable { job = jobWithAntiAffinity @@ -755,6 +787,7 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m Event: &armadaevents.EventSequence_Event_JobRequeued{ JobRequeued: &armadaevents.JobRequeued{ JobId: jobId, + JobIdStr: job.Id(), SchedulingInfo: job.JobSchedulingInfo(), UpdateSequenceNumber: job.QueuedVersion(), }, @@ -799,15 +832,16 @@ func (s *Scheduler) generateUpdateMessagesFromJob(job *jobdb.Job, jobRunErrors m Created: s.now(), Event: &armadaevents.EventSequence_Event_JobErrors{ JobErrors: &armadaevents.JobErrors{ - JobId: jobId, - Errors: []*armadaevents.Error{runError}, + JobId: jobId, + JobIdStr: job.Id(), + Errors: []*armadaevents.Error{runError}, }, }, } events = append(events, jobErrors) } - } else if lastRun.PreemptRequested() && job.GetPriorityClass().Preemptible { + } else if lastRun.PreemptRequested() && job.PriorityClass().Preemptible { job = job.WithQueued(false).WithFailed(true).WithUpdatedRun(lastRun.WithoutTerminal().WithFailed(true)) events = append(events, createEventsForPreemptedJob(jobId, armadaevents.ProtoUuidFromUuid(lastRun.Id()), s.clock.Now())...) } @@ -838,7 +872,7 @@ func (s *Scheduler) expireJobsIfNecessary(ctx *armadacontext.Context, txn *jobdb if err != nil { return nil, err } - staleExecutors := make(map[string]bool, 0) + staleExecutors := make(map[string]bool) cutOff := s.clock.Now().Add(-s.executorTimeout) jobsToUpdate := make([]*jobdb.Job, 0) @@ -894,9 +928,11 @@ func (s *Scheduler) expireJobsIfNecessary(ctx *armadacontext.Context, txn *jobdb Created: s.now(), Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - RunId: armadaevents.ProtoUuidFromUuid(run.Id()), - JobId: jobId, - Errors: []*armadaevents.Error{leaseExpiredError}, + RunId: armadaevents.ProtoUuidFromUuid(run.Id()), + RunIdStr: run.Id().String(), + JobId: jobId, + JobIdStr: job.Id(), + Errors: []*armadaevents.Error{leaseExpiredError}, }, }, }, @@ -904,8 +940,9 @@ func (s *Scheduler) expireJobsIfNecessary(ctx *armadacontext.Context, txn *jobdb Created: s.now(), Event: &armadaevents.EventSequence_Event_JobErrors{ JobErrors: &armadaevents.JobErrors{ - JobId: jobId, - Errors: []*armadaevents.Error{leaseExpiredError}, + JobId: jobId, + JobIdStr: job.Id(), + Errors: []*armadaevents.Error{leaseExpiredError}, }, }, }, @@ -920,48 +957,89 @@ func (s *Scheduler) expireJobsIfNecessary(ctx *armadacontext.Context, txn *jobdb return events, nil } -// cancelQueuedJobsIfExpired generates cancel request messages for any queued jobs that exceed their queueTtl. -func (s *Scheduler) cancelQueuedJobsIfExpired(txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { - jobsToCancel := make([]*jobdb.Job, 0) - events := make([]*armadaevents.EventSequence, 0) - it := txn.QueuedJobsByTtl() +func (s *Scheduler) submitCheck(ctx *armadacontext.Context, txn *jobdb.Txn) ([]*armadaevents.EventSequence, error) { + jobsToCheck := make([]*jobdb.Job, 0) - // `it` is ordered such that the jobs with the least ttl remaining come first, hence we exit early if we find a job that is not expired. - for job, _ := it.Next(); job != nil && job.HasQueueTtlExpired(); job, _ = it.Next() { + it := txn.UnvalidatedJobs() + + for job, _ := it.Next(); job != nil; job, _ = it.Next() { + // Don't check jobs that are terminal if job.InTerminalState() { continue } + jobsToCheck = append(jobsToCheck, job) + } + + results, err := s.submitChecker.Check(ctx, jobsToCheck) + if err != nil { + return nil, err + } + + for _, job := range jobsToCheck { + err := job.ValidateResourceRequests() + if err != nil { + results[job.Id()] = schedulingResult{isSchedulable: false, reason: "invalid resource request: " + err.Error()} + } + } + + events := make([]*armadaevents.EventSequence, 0) + jobsToUpdate := make([]*jobdb.Job, 0) + for _, job := range jobsToCheck { + result := results[job.Id()] - job = job.WithCancelRequested(true).WithQueued(false).WithCancelled(true) jobId, err := armadaevents.ProtoUuidFromUlidString(job.Id()) if err != nil { return nil, err } - reason := "Expired queue ttl" - cancel := &armadaevents.EventSequence{ + es := &armadaevents.EventSequence{ Queue: job.Queue(), JobSetName: job.Jobset(), Events: []*armadaevents.EventSequence_Event{ { Created: s.now(), - Event: &armadaevents.EventSequence_Event_CancelJob{CancelJob: &armadaevents.CancelJob{JobId: jobId, Reason: reason}}, - }, - { - Created: s.now(), - Event: &armadaevents.EventSequence_Event_CancelledJob{CancelledJob: &armadaevents.CancelledJob{JobId: jobId, Reason: reason}}, }, }, } - jobsToCancel = append(jobsToCancel, job) - events = append(events, cancel) + if result.isSchedulable { + job = job.WithValidated(true).WithPools(result.pools) + jobsToUpdate = append(jobsToUpdate, job) + + es.Events[0].Event = &armadaevents.EventSequence_Event_JobValidated{ + JobValidated: &armadaevents.JobValidated{ + JobId: jobId, + JobIdStr: job.Id(), + Pools: result.pools, + }, + } + } else { + job = job.WithFailed(true).WithQueued(false) + jobsToUpdate = append(jobsToUpdate, job) + + es.Events[0].Event = &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{ + JobId: jobId, + JobIdStr: job.Id(), + Errors: []*armadaevents.Error{ + { + Terminal: true, + Reason: &armadaevents.Error_JobRejected{ + JobRejected: &armadaevents.JobRejected{ + Message: result.reason, + }, + }, + }, + }, + }, + } + } + events = append(events, es) } - if err := txn.Upsert(jobsToCancel); err != nil { + if err := txn.Upsert(jobsToUpdate); err != nil { return nil, err } - return events, nil } @@ -981,7 +1059,6 @@ func (s *Scheduler) initialise(ctx *armadacontext.Context) error { case <-ctx.Done(): return nil default: - // TODO(albin): This doesn't need to be separate; we'd anyway load everything in the first scheduling cycle. if _, _, err := s.syncState(ctx); err != nil { logging.WithStacktrace(ctx, err).Error("failed to initialise; trying again in 1 second") time.Sleep(1 * time.Second) diff --git a/internal/scheduler/scheduler_metrics.go b/internal/scheduler/scheduler_metrics.go index 844defb83f1..bc81d4c92c2 100644 --- a/internal/scheduler/scheduler_metrics.go +++ b/internal/scheduler/scheduler_metrics.go @@ -165,7 +165,7 @@ func aggregateJobContexts(previousSchedulingRoundData map[queuePriorityClassKey] for _, jctx := range jctxs { job := jctx.Job - key := queuePriorityClassKey{queue: job.GetQueue(), priorityClass: job.GetPriorityClassName()} + key := queuePriorityClassKey{queue: job.Queue(), priorityClass: job.PriorityClassName()} result[key] += 1 } diff --git a/internal/scheduler/scheduler_test.go b/internal/scheduler/scheduler_test.go index 5bc8f554793..d901b7420e3 100644 --- a/internal/scheduler/scheduler_test.go +++ b/internal/scheduler/scheduler_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" apiconfig "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -24,7 +24,7 @@ import ( schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/database" schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/kubernetesobjects/affinity" "github.com/armadaproject/armada/internal/scheduler/leader" @@ -116,21 +116,6 @@ var ( Version: 2, } schedulingInfoWithUpdatedPriorityBytes = protoutil.MustMarshall(schedulingInfoWithUpdatedPriority) - schedulingInfoWithQueueTtl = &schedulerobjects.JobSchedulingInfo{ - AtMostOnce: true, - ObjectRequirements: []*schedulerobjects.ObjectRequirements{ - { - Requirements: &schedulerobjects.ObjectRequirements_PodRequirements{ - PodRequirements: &schedulerobjects.PodRequirements{ - Priority: int32(10), - }, - }, - }, - }, - QueueTtlSeconds: 2, - Version: 1, - } - schedulingInfoWithQueueTtlBytes = protoutil.MustMarshall(schedulingInfoWithQueueTtl) schedulerMetrics = NewSchedulerMetrics(configuration.SchedulerMetricsConfig{ ScheduleCycleTimeHistogramSettings: configuration.HistogramConfig{ @@ -146,7 +131,7 @@ var ( }) ) -var queuedJob = testfixtures.JobDb.NewJob( +var queuedJob = testfixtures.NewJob( util.NewULID(), "testJobset", "testQueue", @@ -158,23 +143,10 @@ var queuedJob = testfixtures.JobDb.NewJob( false, false, 1, -) - -var queuedJobWithExpiredTtl = testfixtures.JobDb.NewJob( - util.NewULID(), - "testJobset", - "testQueue", - 0, - schedulingInfoWithQueueTtl, true, - 0, - false, - false, - false, - 1, ) -var leasedJob = testfixtures.JobDb.NewJob( +var leasedJob = testfixtures.NewJob( util.NewULID(), "testJobset", "testQueue", @@ -186,9 +158,10 @@ var leasedJob = testfixtures.JobDb.NewJob( false, false, 1, + true, ).WithNewRun("testExecutor", "test-node", "node", 5) -var preemptibleLeasedJob = testfixtures.JobDb.NewJob( +var preemptibleLeasedJob = testfixtures.NewJob( util.NewULID(), "testJobset", "testQueue", @@ -200,9 +173,10 @@ var preemptibleLeasedJob = testfixtures.JobDb.NewJob( false, false, 1, + true, ).WithNewRun("testExecutor", "test-node", "node", 5) -var cancelledJob = testfixtures.JobDb.NewJob( +var cancelledJob = testfixtures.NewJob( util.NewULID(), "testJobset", "testQueue", @@ -214,9 +188,10 @@ var cancelledJob = testfixtures.JobDb.NewJob( false, true, 1, + true, ).WithNewRun("testExecutor", "test-node", "node", 5) -var returnedOnceLeasedJob = testfixtures.JobDb.NewJob( +var returnedOnceLeasedJob = testfixtures.NewJob( "01h3w2wtdchtc80hgyp782shrv", "testJobset", "testQueue", @@ -228,6 +203,7 @@ var returnedOnceLeasedJob = testfixtures.JobDb.NewJob( false, false, 1, + true, ).WithUpdatedRun(testfixtures.JobDb.CreateRun( uuid.New(), "01h3w2wtdchtc80hgyp782shrv", @@ -277,7 +253,7 @@ func defaultJobRunError(jobId string, runId uuid.UUID) *armadaevents.JobRunError } } -var leasedFailFastJob = testfixtures.JobDb.NewJob( +var leasedFailFastJob = testfixtures.NewJob( util.NewULID(), "testJobset", "testQueue", @@ -289,6 +265,7 @@ var leasedFailFastJob = testfixtures.JobDb.NewJob( false, false, 1, + true, ).WithNewRun("testExecutor", "test-node", "node", 5) var ( @@ -297,7 +274,7 @@ var ( testNodeId = api.NodeIdFromExecutorAndNodeName(testExecutor, testNode) scheduledAtPriority = int32(10) requeuedJobId = util.NewULID() - requeuedJob = testfixtures.JobDb.NewJob( + requeuedJob = testfixtures.NewJob( requeuedJobId, "testJobset", "testQueue", @@ -309,6 +286,7 @@ var ( false, false, 1, + true, ).WithUpdatedRun(testfixtures.JobDb.CreateRun( uuid.New(), requeuedJobId, @@ -350,9 +328,9 @@ func TestScheduler_TestCycle(t *testing.T) { expectedJobRunLeased []string // ids of jobs we expect to have produced leased messages expectedJobRunErrors []string // ids of jobs we expect to have produced jobRunErrors messages expectedJobErrors []string // ids of jobs we expect to have produced jobErrors messages - expectedJobsToFail []string // ids of jobs we expect to fail without having failed the overall scheduling cycle expectedJobsRunsToPreempt []string // ids of jobs we expect to be preempted by the scheduler expectedJobRunPreempted []string // ids of jobs we expect to have produced jobRunPreempted messages + expectedJobRunCancelled []string // ids of jobs we expect to have produced jobRunPreempted messages expectedJobCancelled []string // ids of jobs we expect to have produced cancelled messages expectedJobRequestCancel []string // ids of jobs we expect to have produced request cancel expectedJobReprioritised []string // ids of jobs we expect to have produced reprioritised messages @@ -360,6 +338,7 @@ func TestScheduler_TestCycle(t *testing.T) { expectedJobSucceeded []string // ids of jobs we expect to have produced succeeeded messages expectedLeased []string // ids of jobs we expected to be leased in jobdb at the end of the cycle expectedRequeued []string // ids of jobs we expected to be requeued in jobdb at the end of the cycle + expectedValidated []string // ids of jobs we expected to have produced submit checked messages expectedTerminal []string // ids of jobs we expected to be terminal in jobdb at the end of the cycle expectedJobPriority map[string]uint32 // expected priority of jobs at the end of the cycle expectedNodeAntiAffinities []string // list of nodes there is expected to be anti affinities for on job scheduling info @@ -372,6 +351,12 @@ func TestScheduler_TestCycle(t *testing.T) { expectedLeased: []string{queuedJob.Id()}, expectedQueuedVersion: 1, }, + "Submit check a job": { + initialJobs: []*jobdb.Job{queuedJob.WithValidated(false)}, + expectedQueued: []string{queuedJob.Id()}, + expectedValidated: []string{queuedJob.Id()}, + expectedQueuedVersion: 0, + }, "Lease a single job from an update": { jobUpdates: []database.Job{ { @@ -383,6 +368,7 @@ func TestScheduler_TestCycle(t *testing.T) { SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), Serial: 1, + Validated: true, }, }, expectedJobRunLeased: []string{queuedJob.Id()}, @@ -400,6 +386,7 @@ func TestScheduler_TestCycle(t *testing.T) { SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), Serial: 1, + Validated: true, }, { JobID: "01h434g4hxww2pknb2q1nfmfph", @@ -410,6 +397,7 @@ func TestScheduler_TestCycle(t *testing.T) { SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), Serial: 1, + Validated: true, }, }, expectedJobRunLeased: []string{"01h3w2wtdchtc80hgyp782shrv", "01h434g4hxww2pknb2q1nfmfph"}, @@ -454,12 +442,6 @@ func TestScheduler_TestCycle(t *testing.T) { expectedQueued: []string{queuedJob.Id()}, expectedQueuedVersion: queuedJob.QueuedVersion(), }, - "FailedJobs in scheduler result will publish appropriate messages": { - initialJobs: []*jobdb.Job{queuedJob}, - expectedJobErrors: []string{queuedJob.Id()}, - expectedJobsToFail: []string{queuedJob.Id()}, - expectedTerminal: []string{queuedJob.Id()}, - }, "No updates to an already leased job": { initialJobs: []*jobdb.Job{leasedJob}, expectedLeased: []string{leasedJob.Id()}, @@ -481,6 +463,7 @@ func TestScheduler_TestCycle(t *testing.T) { SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), Serial: 1, + Validated: true, }, }, runUpdates: []database.Run{ @@ -541,7 +524,7 @@ func TestScheduler_TestCycle(t *testing.T) { // When a lease is returned and the run was attempted, a node anti affinity is added // If this node anti-affinity makes the job unschedulable, it should be failed. "Lease returned and failed": { - initialJobs: []*jobdb.Job{leasedJob}, + initialJobs: []*jobdb.Job{leasedJob.WithValidated(true)}, runUpdates: []database.Run{ { RunID: leasedJob.LatestRun().Id(), @@ -617,9 +600,10 @@ func TestScheduler_TestCycle(t *testing.T) { Serial: 1, }, }, - expectedJobCancelled: []string{leasedJob.Id()}, - expectedTerminal: []string{leasedJob.Id()}, - expectedQueuedVersion: leasedJob.QueuedVersion(), + expectedJobRunCancelled: []string{leasedJob.Id()}, + expectedJobCancelled: []string{leasedJob.Id()}, + expectedTerminal: []string{leasedJob.Id()}, + expectedQueuedVersion: leasedJob.QueuedVersion(), }, "Job Run preemption requested": { initialJobs: []*jobdb.Job{preemptibleLeasedJob}, @@ -654,81 +638,51 @@ func TestScheduler_TestCycle(t *testing.T) { expectedLeased: []string{leasedJob.Id()}, expectedQueuedVersion: leasedJob.QueuedVersion(), }, - "New job from postgres with expired queue ttl is cancel requested": { - jobUpdates: []database.Job{ - { - JobID: queuedJobWithExpiredTtl.Id(), - JobSet: queuedJobWithExpiredTtl.Jobset(), - Queue: queuedJobWithExpiredTtl.Queue(), - Queued: queuedJobWithExpiredTtl.Queued(), - QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), - Serial: 1, - Submitted: queuedJobWithExpiredTtl.Created(), - SchedulingInfo: schedulingInfoWithQueueTtlBytes, - }, - }, - - // We expect to publish request cancel and cancelled message this cycle. - // The job should also be removed from the queue and set to a terminal state. - expectedJobRequestCancel: []string{queuedJobWithExpiredTtl.Id()}, - expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, - expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), - expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, - }, - "Existing jobDb job with expired queue ttl is cancel requested": { - initialJobs: []*jobdb.Job{queuedJobWithExpiredTtl}, - - // We expect to publish request cancel and cancelled message this cycle. - // The job should also be removed from the queue and set to a terminal state. - expectedJobRequestCancel: []string{queuedJobWithExpiredTtl.Id()}, - expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, - expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), - expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, - }, "New postgres job with cancel requested results in cancel messages": { jobUpdates: []database.Job{ { - JobID: queuedJobWithExpiredTtl.Id(), - JobSet: queuedJobWithExpiredTtl.Jobset(), - Queue: queuedJobWithExpiredTtl.Queue(), - Queued: queuedJobWithExpiredTtl.Queued(), - QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + JobID: queuedJob.Id(), + JobSet: queuedJob.Jobset(), + Queue: queuedJob.Queue(), + Queued: queuedJob.Queued(), + QueuedVersion: queuedJob.QueuedVersion(), Serial: 1, - Submitted: queuedJobWithExpiredTtl.Created(), + Submitted: queuedJob.Created(), CancelRequested: true, Cancelled: false, - SchedulingInfo: schedulingInfoWithQueueTtlBytes, + SchedulingInfo: schedulingInfoBytes, }, }, // We have already got a request cancel from the DB, so only publish a cancelled message. // The job should also be removed from the queue and set to a terminal state.# - expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, - expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), - expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + expectedJobCancelled: []string{queuedJob.Id()}, + expectedQueuedVersion: queuedJob.QueuedVersion(), + expectedTerminal: []string{queuedJob.Id()}, }, "Postgres job with cancel requested results in cancel messages": { - initialJobs: []*jobdb.Job{queuedJobWithExpiredTtl.WithCancelRequested(true)}, + initialJobs: []*jobdb.Job{queuedJob.WithCancelRequested(true)}, jobUpdates: []database.Job{ { - JobID: queuedJobWithExpiredTtl.Id(), - JobSet: queuedJobWithExpiredTtl.Jobset(), - Queue: queuedJobWithExpiredTtl.Queue(), - Queued: queuedJobWithExpiredTtl.Queued(), - QueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), + JobID: queuedJob.Id(), + JobSet: queuedJob.Jobset(), + Queue: queuedJob.Queue(), + Queued: queuedJob.Queued(), + QueuedVersion: queuedJob.QueuedVersion(), Serial: 1, - Submitted: queuedJobWithExpiredTtl.Created(), + Submitted: queuedJob.Created(), CancelRequested: true, Cancelled: false, - SchedulingInfo: schedulingInfoWithQueueTtlBytes, + SchedulingInfo: schedulingInfoBytes, + Priority: int64(queuedJob.Priority()), }, }, // We have already got a request cancel from the DB/existing job state, so only publish a cancelled message. // The job should also be removed from the queue and set to a terminal state. - expectedJobCancelled: []string{queuedJobWithExpiredTtl.Id()}, - expectedQueuedVersion: queuedJobWithExpiredTtl.QueuedVersion(), - expectedTerminal: []string{queuedJobWithExpiredTtl.Id()}, + expectedJobCancelled: []string{queuedJob.Id()}, + expectedQueuedVersion: queuedJob.QueuedVersion(), + expectedTerminal: []string{queuedJob.Id()}, }, "Queued job reprioritised": { initialJobs: []*jobdb.Job{queuedJob}, @@ -787,6 +741,12 @@ func TestScheduler_TestCycle(t *testing.T) { expectedTerminal: []string{leasedJob.Id()}, expectedQueuedVersion: leasedJob.QueuedVersion(), }, + "Submit check failed": { + initialJobs: []*jobdb.Job{queuedJob.WithValidated(false)}, + submitCheckerFailure: true, + expectedJobErrors: []string{queuedJob.Id()}, + expectedTerminal: []string{queuedJob.Id()}, + }, "Job failed": { initialJobs: []*jobdb.Job{leasedJob}, runUpdates: []database.Run{ @@ -865,7 +825,6 @@ func TestScheduler_TestCycle(t *testing.T) { schedulingAlgo := &testSchedulingAlgo{ jobsToSchedule: tc.expectedJobRunLeased, jobsToPreempt: tc.expectedJobsRunsToPreempt, - jobsToFail: tc.expectedJobsToFail, shouldError: tc.scheduleError, } publisher := &testPublisher{shouldError: tc.publishError} @@ -879,7 +838,7 @@ func TestScheduler_TestCycle(t *testing.T) { updateTimes: map[string]time.Time{"testExecutor": heartbeatTime}, } sched, err := NewScheduler( - testfixtures.NewJobDb(), + testfixtures.NewJobDb(testfixtures.TestResourceListFactory), jobRepo, clusterRepo, schedulingAlgo, @@ -921,11 +880,13 @@ func TestScheduler_TestCycle(t *testing.T) { fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobErrors{}): stringSet(tc.expectedJobErrors), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobRunErrors{}): stringSet(tc.expectedJobRunErrors), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobRunPreempted{}): stringSet(tc.expectedJobRunPreempted), + fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobRunCancelled{}): stringSet(tc.expectedJobRunCancelled), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_CancelledJob{}): stringSet(tc.expectedJobCancelled), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_ReprioritisedJob{}): stringSet(tc.expectedJobReprioritised), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobSucceeded{}): stringSet(tc.expectedJobSucceeded), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobRequeued{}): stringSet(tc.expectedRequeued), fmt.Sprintf("%T", &armadaevents.EventSequence_Event_CancelJob{}): stringSet(tc.expectedJobRequestCancel), + fmt.Sprintf("%T", &armadaevents.EventSequence_Event_JobValidated{}): stringSet(tc.expectedValidated), } err = subtractEventsFromOutstandingEventsByType(publisher.eventSequences, outstandingEventsByType) require.NoError(t, err) @@ -1041,7 +1002,7 @@ func TestRun(t *testing.T) { leaderController := leader.NewStandaloneLeaderController() submitChecker := &testSubmitChecker{checkSuccess: true} sched, err := NewScheduler( - testfixtures.NewJobDb(), + testfixtures.NewJobDb(testfixtures.TestResourceListFactory), &jobRepo, clusterRepo, schedulingAlgo, @@ -1076,7 +1037,7 @@ func TestRun(t *testing.T) { wg.Add(1) sched.onCycleCompleted = func() { wg.Done() } jobId := util.NewULID() - jobRepo.updatedJobs = []database.Job{{JobID: jobId, Queue: "testQueue", Queued: true}} + jobRepo.updatedJobs = []database.Job{{JobID: jobId, Queue: "testQueue", Queued: true, Validated: true}} schedulingAlgo.jobsToSchedule = []string{jobId} testClock.Step(10 * time.Second) wg.Wait() @@ -1122,6 +1083,7 @@ func TestScheduler_TestSyncState(t *testing.T) { Priority: int64(queuedJob.Priority()), SchedulingInfo: schedulingInfoBytes, Serial: 1, + Validated: true, }, }, expectedUpdatedJobs: []*jobdb.Job{queuedJob}, @@ -1218,7 +1180,7 @@ func TestScheduler_TestSyncState(t *testing.T) { { RunID: leasedJob.LatestRun().Id(), JobID: leasedJob.LatestRun().JobId(), - JobSet: leasedJob.GetJobSet(), + JobSet: leasedJob.Jobset(), Succeeded: true, }, }, @@ -1244,8 +1206,7 @@ func TestScheduler_TestSyncState(t *testing.T) { }, }, expectedUpdatedJobs: []*jobdb.Job{ - leasedJob. - WithJobSchedulingInfo(updatedSchedulingInfo). + jobdb.JobWithJobSchedulingInfo(leasedJob, updatedSchedulingInfo). WithQueued(true). WithQueuedVersion(3), }, @@ -1267,7 +1228,7 @@ func TestScheduler_TestSyncState(t *testing.T) { clusterRepo := &testExecutorRepository{} leaderController := leader.NewStandaloneLeaderController() sched, err := NewScheduler( - testfixtures.NewJobDb(), + testfixtures.NewJobDb(testfixtures.TestResourceListFactory), jobRepo, clusterRepo, schedulingAlgo, @@ -1288,7 +1249,7 @@ func TestScheduler_TestSyncState(t *testing.T) { // The SchedulingKeyGenerator embedded in the jobDb has some randomness, // which must be consistent within tests. - sched.jobDb = testfixtures.NewJobDb() + sched.jobDb = testfixtures.NewJobDb(testfixtures.TestResourceListFactory) // insert initial jobs txn := sched.jobDb.WriteTxn() @@ -1318,20 +1279,16 @@ type testSubmitChecker struct { checkSuccess bool } -func (t *testSubmitChecker) CheckApiJobs(_ *armadaevents.EventSequence, _ string) (bool, string) { - reason := "" - if !t.checkSuccess { - reason = "CheckApiJobs failed" - } - return t.checkSuccess, reason -} - -func (t *testSubmitChecker) CheckJobDbJobs(_ []*jobdb.Job) (bool, string) { - reason := "" - if !t.checkSuccess { - reason = "CheckJobDbJobs failed" +func (t *testSubmitChecker) Check(_ *armadacontext.Context, jobs []*jobdb.Job) (map[string]schedulingResult, error) { + result := make(map[string]schedulingResult) + for _, job := range jobs { + if t.checkSuccess { + result[job.Id()] = schedulingResult{isSchedulable: true} + } else { + result[job.Id()] = schedulingResult{isSchedulable: false, reason: "job not schedulable"} + } } - return t.checkSuccess, reason + return result, nil } // Test implementations of the interfaces needed by the Scheduler @@ -1398,7 +1355,6 @@ type testSchedulingAlgo struct { numberOfScheduleCalls int jobsToPreempt []string jobsToSchedule []string - jobsToFail []string shouldError bool // Set to true to indicate that preemption/scheduling/failure decisions have been persisted. // Until persisted is set to true, the same jobs are preempted/scheduled/failed on every call. @@ -1416,7 +1372,6 @@ func (t *testSchedulingAlgo) Schedule(_ *armadacontext.Context, txn *jobdb.Txn) } preemptedJobs := make([]*jobdb.Job, 0, len(t.jobsToPreempt)) scheduledJobs := make([]*jobdb.Job, 0, len(t.jobsToSchedule)) - failedJobs := make([]*jobdb.Job, 0, len(t.jobsToFail)) for _, id := range t.jobsToPreempt { job := txn.GetById(id) if job == nil { @@ -1453,27 +1408,13 @@ func (t *testSchedulingAlgo) Schedule(_ *armadacontext.Context, txn *jobdb.Txn) ) scheduledJobs = append(scheduledJobs, job) } - for _, id := range t.jobsToFail { - job := txn.GetById(id) - if job == nil { - return nil, errors.Errorf("was asked to lease %s but job does not exist", id) - } - if !job.Queued() { - return nil, errors.Errorf("was asked to lease %s but job is not queued", job.Id()) - } - job = job.WithQueued(false).WithFailed(true) - failedJobs = append(failedJobs, job) - } if err := txn.Upsert(preemptedJobs); err != nil { return nil, err } if err := txn.Upsert(scheduledJobs); err != nil { return nil, err } - if err := txn.Upsert(failedJobs); err != nil { - return nil, err - } - return NewSchedulerResultForTest(preemptedJobs, scheduledJobs, failedJobs, nil), nil + return NewSchedulerResultForTest(preemptedJobs, scheduledJobs, nil), nil } func (t *testSchedulingAlgo) Persist() { @@ -1485,16 +1426,14 @@ func (t *testSchedulingAlgo) Persist() { return } -func NewSchedulerResultForTest[S ~[]T, T interfaces.LegacySchedulerJob]( +func NewSchedulerResultForTest[S ~[]T, T *jobdb.Job]( preemptedJobs S, scheduledJobs S, - failedJobs S, nodeIdByJobId map[string]string, ) *SchedulerResult { return &SchedulerResult{ PreemptedJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, preemptedJobs), ScheduledJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, scheduledJobs), - FailedJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, failedJobs), NodeIdByJobId: nodeIdByJobId, } } @@ -1554,16 +1493,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), - Serial: 0, - } - queuedJobWithTTLA = &database.Job{ - JobID: queuedJobA.JobID, - JobSet: "testJobSet", - Queue: "testQueue", - Queued: true, - QueuedVersion: 0, - SchedulingInfo: schedulingInfoWithQueueTtlBytes, - SchedulingInfoVersion: int32(schedulingInfoWithQueueTtl.Version), + Validated: true, Serial: 0, } queuedJobWithFailFastA = &database.Job{ @@ -1574,6 +1504,7 @@ var ( QueuedVersion: 0, SchedulingInfo: failFastSchedulingInfoBytes, SchedulingInfoVersion: int32(failFastSchedulingInfo.Version), + Validated: true, Serial: 0, } queuedJobWithUpdatedPriorityA = &database.Job{ @@ -1584,6 +1515,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoWithUpdatedPriorityBytes, SchedulingInfoVersion: int32(schedulingInfoWithUpdatedPriority.Version), + Validated: true, Serial: 1, } runningJobA = &database.Job{ @@ -1593,6 +1525,7 @@ var ( QueuedVersion: 1, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } runningJobWithUpdatedPriorityA = &database.Job{ @@ -1602,6 +1535,7 @@ var ( QueuedVersion: 1, SchedulingInfo: schedulingInfoWithUpdatedPriorityBytes, SchedulingInfoVersion: int32(schedulingInfoWithUpdatedPriority.Version), + Validated: true, Serial: 1, } failedJobA = &database.Job{ @@ -1612,6 +1546,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } cancelledJobA = &database.Job{ @@ -1622,6 +1557,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } cancelRequestedJobA = &database.Job{ @@ -1633,6 +1569,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } cancelByJobSetRequestedJobA = &database.Job{ @@ -1644,6 +1581,7 @@ var ( QueuedVersion: 0, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } runningCancelRequestedJobA = &database.Job{ @@ -1655,6 +1593,7 @@ var ( QueuedVersion: 1, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } runningCancelByJobSetRequestedJobA = &database.Job{ @@ -1665,6 +1604,7 @@ var ( QueuedVersion: 1, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), + Validated: true, Serial: 0, } newRunA = &database.Run{ @@ -1722,11 +1662,11 @@ var ( } ) -func jobDbJobFromDbJob(job *database.Job) *jobdb.Job { +func jobDbJobFromDbJob(resourceListFactory *internaltypes.ResourceListFactory, job *database.Job) *jobdb.Job { var schedulingInfo schedulerobjects.JobSchedulingInfo protoutil.MustUnmarshall(job.SchedulingInfo, &schedulingInfo) // Use a fresh jobDb instance to ensure run ids are consistent. - return testfixtures.NewJobDb().NewJob( + result, err := testfixtures.NewJobDb(resourceListFactory).NewJob( job.JobID, job.JobSet, job.Queue, @@ -1738,7 +1678,13 @@ func jobDbJobFromDbJob(job *database.Job) *jobdb.Job { job.CancelByJobsetRequested, job.Cancelled, 0, + job.Validated, + job.Pools, ) + if err != nil { + panic(err) + } + return result } // TestCycleConsistency runs two replicas of the scheduler and asserts that their state remains consistent @@ -1746,6 +1692,7 @@ func jobDbJobFromDbJob(job *database.Job) *jobdb.Job { // // TODO(albin): Test lease expiry. func TestCycleConsistency(t *testing.T) { + resourceListFactory := testfixtures.MakeTestResourceListFactory() type schedulerDbUpdate struct { jobUpdates []*database.Job // Job updates from the database. runUpdates []*database.Run // Run updates from the database. @@ -1760,7 +1707,6 @@ func TestCycleConsistency(t *testing.T) { // Controls which jobs the scheduler should schedule/preempt/fail. idsOfJobsToSchedule []string idsOfJobsToPreempt []string - idsOfJobsToFail []string // Expected jobDbs for scenario 1, i.e., the baseline scenario. // Only compared against if not nil. @@ -1773,6 +1719,9 @@ func TestCycleConsistency(t *testing.T) { expectedEventSequencesCycleOne []*armadaevents.EventSequence expectedEventSequencesCycleTwo []*armadaevents.EventSequence expectedEventSequencesCycleThree []*armadaevents.EventSequence + + // If true then will jobs will fail submit check + failSubmitCheck bool }{ "Load a queued job": { firstSchedulerDbUpdate: schedulerDbUpdate{ @@ -1781,13 +1730,13 @@ func TestCycleConsistency(t *testing.T) { }, }, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA), + jobDbJobFromDbJob(resourceListFactory, queuedJobA), }, expectedJobDbCycleTwo: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA), + jobDbJobFromDbJob(resourceListFactory, queuedJobA), }, expectedJobDbCycleThree: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA), + jobDbJobFromDbJob(resourceListFactory, queuedJobA), }, expectedEventSequencesCycleThree: make([]*armadaevents.EventSequence, 0), }, @@ -1820,7 +1769,7 @@ func TestCycleConsistency(t *testing.T) { }, }, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(cancelRequestedJobA).WithQueued(false).WithCancelled(true), + jobDbJobFromDbJob(resourceListFactory, cancelRequestedJobA).WithQueued(false).WithCancelled(true), }, expectedJobDbCycleTwo: []*jobdb.Job{}, expectedJobDbCycleThree: []*jobdb.Job{}, @@ -1833,7 +1782,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelledJob{ CancelledJob: &armadaevents.CancelledJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -1848,7 +1798,7 @@ func TestCycleConsistency(t *testing.T) { }, }, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(cancelByJobSetRequestedJobA).WithQueued(false).WithCancelled(true), + jobDbJobFromDbJob(resourceListFactory, cancelByJobSetRequestedJobA).WithQueued(false).WithCancelled(true), }, expectedJobDbCycleTwo: []*jobdb.Job{}, expectedJobDbCycleThree: []*jobdb.Job{}, @@ -1861,7 +1811,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelJob{ CancelJob: &armadaevents.CancelJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -1869,7 +1820,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelledJob{ CancelledJob: &armadaevents.CancelledJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -1888,7 +1840,7 @@ func TestCycleConsistency(t *testing.T) { }, expectedJobDbCycleOne: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(runningCancelRequestedJobA).WithCancelled(true).WithNewRun(testExecutor, testNodeId, testNode, 10) + job := jobDbJobFromDbJob(resourceListFactory, runningCancelRequestedJobA).WithCancelled(true).WithNewRun(testExecutor, testNodeId, testNode, 10) return job.WithUpdatedRun(job.LatestRun().WithCancelled(true).WithAttempted(true)) }(), }, @@ -1899,11 +1851,23 @@ func TestCycleConsistency(t *testing.T) { Queue: queuedJobA.Queue, JobSetName: queuedJobA.JobSet, Events: []*armadaevents.EventSequence_Event{ + { + Created: pointerFromValue(time.Unix(0, 0)), + Event: &armadaevents.EventSequence_Event_JobRunCancelled{ + JobRunCancelled: &armadaevents.JobRunCancelled{ + RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + RunIdStr: testfixtures.UUIDFromInt(1).String(), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + }, + }, + }, { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelledJob{ CancelledJob: &armadaevents.CancelledJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -1922,7 +1886,7 @@ func TestCycleConsistency(t *testing.T) { }, expectedJobDbCycleOne: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(runningCancelByJobSetRequestedJobA).WithCancelled(true).WithNewRun(testExecutor, testNodeId, testNode, 10) + job := jobDbJobFromDbJob(resourceListFactory, runningCancelByJobSetRequestedJobA).WithCancelled(true).WithNewRun(testExecutor, testNodeId, testNode, 10) return job.WithUpdatedRun(job.LatestRun().WithCancelled(true).WithAttempted(true)) }(), }, @@ -1937,7 +1901,19 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelJob{ CancelJob: &armadaevents.CancelJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + }, + }, + }, + { + Created: pointerFromValue(time.Unix(0, 0)), + Event: &armadaevents.EventSequence_Event_JobRunCancelled{ + JobRunCancelled: &armadaevents.JobRunCancelled{ + RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + RunIdStr: testfixtures.UUIDFromInt(1).String(), }, }, }, @@ -1945,7 +1921,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_CancelledJob{ CancelledJob: &armadaevents.CancelledJob{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -1961,13 +1938,13 @@ func TestCycleConsistency(t *testing.T) { }, idsOfJobsToSchedule: []string{queuedJobA.JobID}, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleTwo: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleThree: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedEventSequencesCycleThree: []*armadaevents.EventSequence{ { @@ -1979,7 +1956,9 @@ func TestCycleConsistency(t *testing.T) { Event: &armadaevents.EventSequence_Event_JobRunLeased{ JobRunLeased: &armadaevents.JobRunLeased{ RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + RunIdStr: testfixtures.UUIDFromInt(1).String(), JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, ExecutorId: testExecutor, NodeId: testNode, UpdateSequenceNumber: 1, @@ -1993,44 +1972,6 @@ func TestCycleConsistency(t *testing.T) { }, }, }, - "Fail a new job": { - firstSchedulerDbUpdate: schedulerDbUpdate{ - jobUpdates: []*database.Job{ - queuedJobA, - }, - }, - idsOfJobsToFail: []string{queuedJobA.JobID}, - expectedJobDbCycleOne: []*jobdb.Job{}, - expectedJobDbCycleTwo: []*jobdb.Job{}, - expectedJobDbCycleThree: []*jobdb.Job{}, - expectedEventSequencesCycleThree: []*armadaevents.EventSequence{ - { - Queue: queuedJobA.Queue, - JobSetName: queuedJobA.JobSet, - Events: []*armadaevents.EventSequence_Event{ - { - Created: pointerFromValue(time.Unix(0, 0)), - Event: &armadaevents.EventSequence_Event_JobErrors{ - JobErrors: &armadaevents.JobErrors{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), - Errors: []*armadaevents.Error{ - { - Terminal: true, - Reason: &armadaevents.Error_GangJobUnschedulable{ - GangJobUnschedulable: &armadaevents.GangJobUnschedulable{ - // This message is somewhat arbitrary here. - Message: "Job did not meet the minimum gang cardinality", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, "Schedule a job that then succeeds": { firstSchedulerDbUpdate: schedulerDbUpdate{ jobUpdates: []*database.Job{ @@ -2044,11 +1985,11 @@ func TestCycleConsistency(t *testing.T) { }, idsOfJobsToSchedule: []string{queuedJobA.JobID}, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleTwo: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithSucceeded(true) + job := jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithSucceeded(true) return job.WithUpdatedRun(job.LatestRun().WithSucceeded(true).WithAttempted(true)) }(), }, @@ -2064,6 +2005,8 @@ func TestCycleConsistency(t *testing.T) { JobRunLeased: &armadaevents.JobRunLeased{ RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + RunIdStr: testfixtures.UUIDFromInt(1).String(), ExecutorId: testExecutor, NodeId: testNode, UpdateSequenceNumber: 1, @@ -2083,7 +2026,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_JobSucceeded{ JobSucceeded: &armadaevents.JobSucceeded{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, }, }, }, @@ -2107,11 +2051,11 @@ func TestCycleConsistency(t *testing.T) { }, idsOfJobsToSchedule: []string{queuedJobA.JobID}, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleTwo: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) + job := jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) return job.WithUpdatedRun(job.LatestRun().WithFailed(true).WithAttempted(true)) }(), }, @@ -2127,6 +2071,8 @@ func TestCycleConsistency(t *testing.T) { JobRunLeased: &armadaevents.JobRunLeased{ RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + RunIdStr: testfixtures.UUIDFromInt(1).String(), ExecutorId: testExecutor, NodeId: testNode, UpdateSequenceNumber: 1, @@ -2146,7 +2092,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_JobErrors{ JobErrors: &armadaevents.JobErrors{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, Errors: []*armadaevents.Error{ { Terminal: true, @@ -2180,11 +2127,11 @@ func TestCycleConsistency(t *testing.T) { }, idsOfJobsToSchedule: []string{queuedJobA.JobID}, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleTwo: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) + job := jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) // TODO(albin): RunAttempted is implicitly set to true for failed runs with error other than PodLeaseReturned. // See func (c *InstructionConverter) handleJobRunErrors. return job.WithUpdatedRun(job.LatestRun().WithFailed(true).WithAttempted(true)) @@ -2208,15 +2155,16 @@ func TestCycleConsistency(t *testing.T) { }, idsOfJobsToSchedule: []string{queuedJobA.JobID}, expectedJobDbCycleOne: []*jobdb.Job{ - jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), + jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10), }, expectedJobDbCycleTwo: []*jobdb.Job{ func() *jobdb.Job { - job := jobDbJobFromDbJob(queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) + job := jobDbJobFromDbJob(resourceListFactory, queuedJobA).WithQueued(false).WithQueuedVersion(1).WithNewRun(testExecutor, testNodeId, testNode, 10).WithFailed(true) return job.WithUpdatedRun(job.LatestRun().WithFailed(true).WithAttempted(true).WithReturned(true)) }(), }, expectedJobDbCycleThree: []*jobdb.Job{}, + failSubmitCheck: true, }, "Schedule a new job that then fails to start with fail-fast set to true": { firstSchedulerDbUpdate: schedulerDbUpdate{ @@ -2255,8 +2203,10 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_JobRunPreempted{ JobRunPreempted: &armadaevents.JobRunPreempted{ - PreemptedRunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), - PreemptedJobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + PreemptedRunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + PreemptedRunIdStr: testfixtures.UUIDFromInt(1).String(), + PreemptedJobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + PreemptedJobIdStr: queuedJobA.JobID, }, }, }, @@ -2264,8 +2214,10 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_JobRunErrors{ JobRunErrors: &armadaevents.JobRunErrors{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), - RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, + RunId: armadaevents.ProtoUuidFromUuid(testfixtures.UUIDFromInt(1)), + RunIdStr: testfixtures.UUIDFromInt(1).String(), Errors: []*armadaevents.Error{ { Terminal: true, @@ -2281,7 +2233,8 @@ func TestCycleConsistency(t *testing.T) { Created: pointerFromValue(time.Unix(0, 0)), Event: &armadaevents.EventSequence_Event_JobErrors{ JobErrors: &armadaevents.JobErrors{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), + JobIdStr: queuedJobA.JobID, Errors: []*armadaevents.Error{ { Terminal: true, @@ -2351,13 +2304,6 @@ func TestCycleConsistency(t *testing.T) { }, }, }, - "Queued job with expired ttl results in cancellation": { - firstSchedulerDbUpdate: schedulerDbUpdate{ - jobUpdates: []*database.Job{ - queuedJobWithTTLA, - }, - }, - }, "Queued job is re-prioritised": { firstSchedulerDbUpdate: schedulerDbUpdate{ jobUpdates: []*database.Job{ @@ -2411,7 +2357,7 @@ func TestCycleConsistency(t *testing.T) { // Helper function for creating new schedulers for use in tests. newScheduler := func(db *pgxpool.Pool) *Scheduler { scheduler, err := NewScheduler( - testfixtures.NewJobDb(), + testfixtures.NewJobDb(resourceListFactory), database.NewPostgresJobRepository(db, 1024), &testExecutorRepository{ updateTimes: map[string]time.Time{"test-executor": testClock.Now()}, @@ -2419,11 +2365,12 @@ func TestCycleConsistency(t *testing.T) { &testSchedulingAlgo{ jobsToSchedule: tc.idsOfJobsToSchedule, jobsToPreempt: tc.idsOfJobsToPreempt, - jobsToFail: tc.idsOfJobsToFail, }, leader.NewStandaloneLeaderController(), newTestPublisher(), - &testSubmitChecker{}, + &testSubmitChecker{ + checkSuccess: !tc.failSubmitCheck, + }, 1*time.Second, 5*time.Second, 0, diff --git a/internal/scheduler/schedulerapp.go b/internal/scheduler/schedulerapp.go index 95ef48d9022..801aa1c596c 100644 --- a/internal/scheduler/schedulerapp.go +++ b/internal/scheduler/schedulerapp.go @@ -10,14 +10,15 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/google/uuid" + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/redis/go-redis/v9" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/app" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -36,12 +37,16 @@ import ( schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/failureestimator" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/leader" "github.com/armadaproject/armada/internal/scheduler/metrics" "github.com/armadaproject/armada/internal/scheduler/quarantine" + "github.com/armadaproject/armada/internal/scheduler/queue" "github.com/armadaproject/armada/internal/scheduler/reports" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client" "github.com/armadaproject/armada/pkg/executorapi" ) @@ -70,6 +75,15 @@ func Run(config schedulerconfig.Configuration) error { shutdownHttpServer := common.ServeHttp(uint16(config.Http.Port), mux) defer shutdownHttpServer() + // //////////////////////////////////////////////////////////////////////// + // Resource list factory + // //////////////////////////////////////////////////////////////////////// + resourceListFactory, err := internaltypes.MakeResourceListFactory(config.Scheduling.SupportedResourceTypes) + if err != nil { + return errors.WithMessage(err, "Error with the .scheduling.supportedResourceTypes field in config") + } + ctx.Infof("Supported resource types: %s", resourceListFactory.SummaryString()) + // List of services to run concurrently. // Because we want to start services only once all input validation has been completed, // we add all services to a slice and start them together at the end of this function. @@ -87,17 +101,24 @@ func Run(config schedulerconfig.Configuration) error { jobRepository := database.NewPostgresJobRepository(db, int32(config.DatabaseFetchSize)) executorRepository := database.NewPostgresExecutorRepository(db) - redisClient := redis.NewUniversalClient(config.Redis.AsUniversalOptions()) + // //////////////////////////////////////////////////////////////////////// + // Queue Cache + // //////////////////////////////////////////////////////////////////////// + conn, err := client.CreateApiConnection(&config.ArmadaApi) + if err != nil { + return errors.WithMessage(err, "error creating armada api client") + } defer func() { - err := redisClient.Close() + err := conn.Close() if err != nil { logging. WithStacktrace(ctx, err). - Warnf("Redis client didn't close down cleanly") + Warnf("Armada api client didn't close down cleanly") } }() - queueRepository := repository.NewRedisQueueRepository(redisClient) - legacyExecutorRepository := database.NewRedisExecutorRepository(redisClient, "pulsar") + armadaClient := api.NewSubmitClient(conn) + queueCache := queue.NewQueueCache(armadaClient, config.QueueRefreshPeriod) + services = append(services, func() error { return queueCache.Run(ctx) }) // //////////////////////////////////////////////////////////////////////// // Pulsar @@ -147,7 +168,7 @@ func Run(config schedulerconfig.Configuration) error { if err != nil { return errors.WithMessage(err, "error creating auth services") } - grpcServer := grpcCommon.CreateGrpcServer(config.Grpc.KeepaliveParams, config.Grpc.KeepaliveEnforcementPolicy, authServices, config.Grpc.Tls) + grpcServer := grpcCommon.CreateGrpcServer(config.Grpc.KeepaliveParams, config.Grpc.KeepaliveEnforcementPolicy, authServices, config.Grpc.Tls, createLogrusLoggingOption()) defer grpcServer.GracefulStop() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", config.Grpc.Port)) if err != nil { @@ -157,7 +178,6 @@ func Run(config schedulerconfig.Configuration) error { apiProducer, jobRepository, executorRepository, - legacyExecutorRepository, types.AllowedPriorities(config.Scheduling.PriorityClasses), config.Scheduling.NodeIdLabel, config.Scheduling.PriorityClassNameOverride, @@ -189,16 +209,23 @@ func Run(config schedulerconfig.Configuration) error { // //////////////////////////////////////////////////////////////////////// ctx.Infof("setting up scheduling loop") - submitChecker := NewSubmitChecker( - 30*time.Minute, - config.Scheduling, - executorRepository, - ) - services = append(services, func() error { - return submitChecker.Run(ctx) - }) - if err != nil { - return errors.WithMessage(err, "error creating submit checker") + var submitChecker SubmitScheduleChecker + if !config.DisableSubmitCheck { + submitCheckerImpl := NewSubmitChecker( + config.Scheduling, + executorRepository, + resourceListFactory, + ) + services = append(services, func() error { + return submitCheckerImpl.Run(ctx) + }) + if err != nil { + return errors.WithMessage(err, "error creating submit checker") + } + submitChecker = submitCheckerImpl + } else { + ctx.Infof("DisableSubmitCheckis true, will use a dummy submit check") + submitChecker = &DummySubmitChecker{} } // Setup failure estimation and quarantining. @@ -247,11 +274,12 @@ func Run(config schedulerconfig.Configuration) error { config.Scheduling, config.MaxSchedulingDuration, executorRepository, - queueRepository, + queueCache, schedulingContextRepository, nodeQuarantiner, queueQuarantiner, stringInterner, + resourceListFactory, ) if err != nil { return errors.WithMessage(err, "error creating scheduling algo") @@ -260,6 +288,7 @@ func Run(config schedulerconfig.Configuration) error { config.Scheduling.PriorityClasses, config.Scheduling.DefaultPriorityClassName, stringInterner, + resourceListFactory, ) schedulingRoundMetrics := NewSchedulerMetrics(config.Metrics.Metrics) if err := prometheus.Register(schedulingRoundMetrics); err != nil { @@ -301,13 +330,10 @@ func Run(config schedulerconfig.Configuration) error { // //////////////////////////////////////////////////////////////////////// // Metrics // //////////////////////////////////////////////////////////////////////// - poolAssigner, err := NewPoolAssigner(config.Scheduling.ExecutorTimeout, config.Scheduling, executorRepository) - if err != nil { - return errors.WithMessage(err, "error creating pool assigner") - } + poolAssigner := NewPoolAssigner(executorRepository) metricsCollector := NewMetricsCollector( scheduler.jobDb, - queueRepository, + queueCache, executorRepository, poolAssigner, config.Metrics.RefreshInterval, @@ -366,3 +392,20 @@ func loadClusterConfig(ctx *armadacontext.Context) (*rest.Config, error) { ctx.Info("Running with in cluster client configuration") return config, err } + +// This changes the default logrus grpc logging to log OK messages at trace level +// The reason for doing this are: +// - Reduced logging +// - We only care about failures, so lets only log failures +// - We normally use these logs to work out who is calling us, however the Executor API is not public +// and is only called by other Armada components +func createLogrusLoggingOption() grpc_logrus.Option { + return grpc_logrus.WithLevels(func(code codes.Code) log.Level { + switch code { + case codes.OK: + return log.TraceLevel + default: + return grpc_logrus.DefaultCodeToLevel(code) + } + }) +} diff --git a/internal/scheduler/schedulerobjects/podutils_test.go b/internal/scheduler/schedulerobjects/podutils_test.go index f4819f3e59f..a1de25d2c2b 100644 --- a/internal/scheduler/schedulerobjects/podutils_test.go +++ b/internal/scheduler/schedulerobjects/podutils_test.go @@ -213,14 +213,14 @@ func getBenchmarkJobSchedulingSchedulingInfo() *JobSchedulingInfo { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -323,14 +323,14 @@ func getBenchmarkJobSchedulingSchedulingInfoWithAffinity() *JobSchedulingInfo { PreemptionPolicy: "abc", ResourceRequirements: v1.ResourceRequirements{ Limits: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("3"), + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("3"), }, Requests: map[v1.ResourceName]resource.Quantity{ - "cpu": resource.MustParse("2"), - "memory": resource.MustParse("2"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("2"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, diff --git a/internal/scheduler/schedulerobjects/resourcelist.go b/internal/scheduler/schedulerobjects/resourcelist.go index a72bc4c63f4..d8b215e42b9 100644 --- a/internal/scheduler/schedulerobjects/resourcelist.go +++ b/internal/scheduler/schedulerobjects/resourcelist.go @@ -278,19 +278,6 @@ func (a ResourceList) IsStrictlyNonNegative() bool { return true } -// IsStrictlyLessOrEqual returns false if -// - there is a quantity in b greater than that in a or -// - there is a non-zero quantity in b not in a -// and true otherwise. -func (a ResourceList) IsStrictlyLessOrEqual(b ResourceList) bool { - for t, q := range b.Resources { - if q.Cmp(a.Get(t)) == -1 { - return false - } - } - return true -} - func (rl ResourceList) CompactString() string { var sb strings.Builder sb.WriteString("{") @@ -365,14 +352,6 @@ func (m AllocatableByPriorityAndResourceType) MarkAllocatable(p int32, rs Resour } } -func (m AllocatableByPriorityAndResourceType) MarkAllocatedV1ResourceList(p int32, rs v1.ResourceList) { - for priority, allocatableResourcesAtPriority := range m { - if priority <= p { - allocatableResourcesAtPriority.SubV1ResourceList(rs) - } - } -} - func (m AllocatableByPriorityAndResourceType) MarkAllocatableV1ResourceList(p int32, rs v1.ResourceList) { for priority, allocatableResourcesAtPriority := range m { if priority <= p { diff --git a/internal/scheduler/schedulerobjects/resourcelist_test.go b/internal/scheduler/schedulerobjects/resourcelist_test.go index f744686ea66..c3c989880ba 100644 --- a/internal/scheduler/schedulerobjects/resourcelist_test.go +++ b/internal/scheduler/schedulerobjects/resourcelist_test.go @@ -449,8 +449,8 @@ func TestAllocatableByPriorityAndResourceType(t *testing.T) { UsedAtPriority: 1, Resources: ResourceList{ Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -459,8 +459,8 @@ func TestAllocatableByPriorityAndResourceType(t *testing.T) { UsedAtPriority: 5, Resources: ResourceList{ Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -469,8 +469,8 @@ func TestAllocatableByPriorityAndResourceType(t *testing.T) { UsedAtPriority: 10, Resources: ResourceList{ Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, }, @@ -514,24 +514,24 @@ func TestAllocatedByPriorityAndResourceType(t *testing.T) { Priorities: []int32{1, 5, 10}, UsedAtPriority: 1, Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, "mid priority": { Priorities: []int32{1, 5, 10}, UsedAtPriority: 5, Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, "highest priority": { Priorities: []int32{1, 5, 10}, UsedAtPriority: 10, Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "gpu": resource.MustParse("2"), + "cpu": resource.MustParse("1"), + "nvidia.com/gpu": resource.MustParse("2"), }, }, } @@ -744,139 +744,6 @@ func TestResourceListIsStrictlyNonNegative(t *testing.T) { } } -func TestResourceListIsStrictlyLessOrEqual(t *testing.T) { - tests := map[string]struct { - a ResourceList - b ResourceList - expected bool - }{ - "both empty": { - a: ResourceList{}, - b: ResourceList{}, - expected: true, - }, - "both empty maps": { - a: ResourceList{ - Resources: make(map[string]resource.Quantity), - }, - b: ResourceList{ - Resources: make(map[string]resource.Quantity), - }, - expected: true, - }, - "one empty map": { - a: ResourceList{ - Resources: make(map[string]resource.Quantity), - }, - b: ResourceList{}, - expected: true, - }, - "zero equals empty": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("0"), - }, - }, - b: ResourceList{}, - expected: true, - }, - "zero and missing is equal": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("0"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - }, - }, - expected: true, - }, - "simple equal": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "foo": resource.MustParse("3"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2"), - "foo": resource.MustParse("3"), - }, - }, - expected: true, - }, - "simple true": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("2"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("3"), - }, - }, - expected: true, - }, - "simple false": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("3"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("2"), - }, - }, - expected: false, - }, - "present in a missing in b true": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("2"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - }, - }, - expected: true, - }, - "missing in a present in b true": { - a: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - }, - }, - b: ResourceList{ - Resources: map[string]resource.Quantity{ - "foo": resource.MustParse("1"), - "bar": resource.MustParse("2"), - }, - }, - expected: true, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expected, tc.a.IsStrictlyLessOrEqual(tc.b)) - }) - } -} - func TestResourceListZero(t *testing.T) { rl := ResourceList{ Resources: map[string]resource.Quantity{ diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go index 7c985485f5f..52e9af1b4c6 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go +++ b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go @@ -630,8 +630,6 @@ type JobSchedulingInfo struct { // Kubernetes objects that make up this job and their respective scheduling requirements. ObjectRequirements []*ObjectRequirements `protobuf:"bytes,5,rep,name=object_requirements,json=objectRequirements,proto3" json:"objectRequirements,omitempty"` Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - QueueTtlSeconds int64 `protobuf:"varint,10,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *JobSchedulingInfo) Reset() { *m = JobSchedulingInfo{} } @@ -726,13 +724,6 @@ func (m *JobSchedulingInfo) GetVersion() uint32 { return 0 } -func (m *JobSchedulingInfo) GetQueueTtlSeconds() int64 { - if m != nil { - return m.QueueTtlSeconds - } - return 0 -} - // Message capturing the scheduling requirements of a particular Kubernetes object. type ObjectRequirements struct { // Types that are valid to be assigned to Requirements: @@ -1007,142 +998,141 @@ func init() { } var fileDescriptor_97dadc5fbd620721 = []byte{ - // 2160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x39, 0xcd, 0x6f, 0x1b, 0xc7, - 0xf5, 0x5a, 0x7d, 0x92, 0x8f, 0xb2, 0x44, 0x8d, 0x2c, 0x7b, 0x45, 0xdb, 0x5c, 0x85, 0xf1, 0x2f, - 0x50, 0x7e, 0x71, 0xc8, 0x46, 0x29, 0x50, 0xc3, 0xed, 0x45, 0xb4, 0xd4, 0x9a, 0xaa, 0x4d, 0xc9, - 0x4b, 0xa9, 0x45, 0x0b, 0x34, 0x8b, 0x25, 0x77, 0x44, 0x6f, 0xb4, 0x9c, 0xa1, 0x77, 0x67, 0xdd, - 0x30, 0xe7, 0xf6, 0x50, 0x04, 0x48, 0x83, 0xa2, 0x1f, 0x01, 0x0a, 0xb4, 0xc8, 0xad, 0xe7, 0x1e, - 0xda, 0x43, 0xff, 0x01, 0x1f, 0x73, 0xec, 0x89, 0x29, 0xec, 0x1b, 0xaf, 0xfd, 0x07, 0x8a, 0x9d, - 0xd9, 0xe5, 0x0e, 0x77, 0x49, 0x51, 0x4e, 0xea, 0xea, 0x44, 0xce, 0xfb, 0x9e, 0xf7, 0xde, 0xbc, - 0x79, 0x6f, 0x16, 0xee, 0xd9, 0x84, 0x61, 0x97, 0x98, 0x4e, 0xc5, 0x6b, 0x3d, 0xc1, 0x96, 0xef, - 0x60, 0x37, 0xfe, 0x47, 0x9b, 0x1f, 0xe2, 0x16, 0xf3, 0x52, 0x80, 0x72, 0xd7, 0xa5, 0x8c, 0xa2, - 0x7c, 0x12, 0x5e, 0xd0, 0xda, 0x94, 0xb6, 0x1d, 0x5c, 0xe1, 0xf8, 0xa6, 0x7f, 0x5a, 0x61, 0x76, - 0x07, 0x7b, 0xcc, 0xec, 0x74, 0x05, 0x4b, 0xa1, 0x74, 0x76, 0xd7, 0x2b, 0xdb, 0xb4, 0x62, 0x76, - 0xed, 0x4a, 0x8b, 0xba, 0xb8, 0xf2, 0xec, 0xbd, 0x4a, 0x1b, 0x13, 0xec, 0x9a, 0x0c, 0x5b, 0x21, - 0xcd, 0xb7, 0x63, 0x9a, 0x8e, 0xd9, 0x7a, 0x62, 0x13, 0xec, 0xf6, 0x2a, 0xdd, 0xb3, 0x36, 0x67, - 0x72, 0xb1, 0x47, 0x7d, 0xb7, 0x85, 0x53, 0x5c, 0xef, 0xb6, 0x6d, 0xf6, 0xc4, 0x6f, 0x96, 0x5b, - 0xb4, 0x53, 0x69, 0xd3, 0x36, 0x8d, 0x6d, 0x08, 0x56, 0x7c, 0xc1, 0xff, 0x09, 0xf2, 0xd2, 0x5f, - 0xe6, 0x20, 0xb3, 0xff, 0x11, 0x6e, 0xf9, 0x8c, 0xba, 0x68, 0x0b, 0x66, 0x6d, 0x4b, 0x55, 0xb6, - 0x94, 0xed, 0x6c, 0x35, 0x3f, 0xe8, 0x6b, 0xcb, 0xb6, 0x75, 0x87, 0x76, 0x6c, 0x86, 0x3b, 0x5d, - 0xd6, 0xd3, 0x67, 0x6d, 0x0b, 0xbd, 0x05, 0xf3, 0x5d, 0x4a, 0x1d, 0x75, 0x96, 0xd3, 0xa0, 0x41, - 0x5f, 0x5b, 0x09, 0xd6, 0x12, 0x15, 0xc7, 0xa3, 0x5d, 0x58, 0x20, 0xd4, 0xc2, 0x9e, 0x3a, 0xb7, - 0x35, 0xb7, 0x9d, 0xdb, 0xb9, 0x56, 0x4e, 0xb9, 0xae, 0x4e, 0x2d, 0x5c, 0x5d, 0x1f, 0xf4, 0xb5, - 0x55, 0x4e, 0x28, 0x49, 0x10, 0x9c, 0xe8, 0x03, 0x58, 0xe9, 0xd8, 0xc4, 0xee, 0xf8, 0x9d, 0x03, - 0xda, 0x6c, 0xd8, 0x1f, 0x63, 0x75, 0x7e, 0x4b, 0xd9, 0xce, 0xed, 0x14, 0xd3, 0xb2, 0xf4, 0xd0, - 0x19, 0x0f, 0x6d, 0x8f, 0x55, 0xaf, 0x3d, 0xef, 0x6b, 0x33, 0x81, 0x61, 0xa3, 0xdc, 0x7a, 0x62, - 0x1d, 0xc8, 0x77, 0x4c, 0x8f, 0x9d, 0x74, 0x2d, 0x93, 0xe1, 0x63, 0xbb, 0x83, 0xd5, 0x05, 0x2e, - 0xbf, 0x50, 0x16, 0xc1, 0x2b, 0x47, 0x8e, 0x2b, 0x1f, 0x47, 0xc1, 0xab, 0x16, 0x22, 0xd9, 0xa3, - 0x9c, 0x9f, 0x7d, 0xa5, 0x29, 0x7a, 0x02, 0x86, 0x0e, 0x61, 0xdd, 0x27, 0xa6, 0xe7, 0xd9, 0x6d, - 0x82, 0x2d, 0xe3, 0x43, 0xda, 0x34, 0x5c, 0x9f, 0x78, 0x6a, 0x76, 0x6b, 0x6e, 0x3b, 0x5b, 0xd5, - 0x06, 0x7d, 0xed, 0x46, 0x8c, 0x3e, 0xa0, 0x4d, 0xdd, 0x27, 0xb2, 0x13, 0xd6, 0x52, 0xc8, 0xd2, - 0x5f, 0x37, 0x60, 0x3e, 0xf0, 0xda, 0xc5, 0xc2, 0x44, 0xcc, 0x0e, 0x56, 0x97, 0xe3, 0x30, 0x05, - 0x6b, 0x39, 0x4c, 0xc1, 0x1a, 0xed, 0x40, 0x06, 0x87, 0xc1, 0x57, 0xd7, 0x39, 0xed, 0xb5, 0x41, - 0x5f, 0x43, 0x11, 0x4c, 0xa2, 0x1f, 0xd2, 0xa1, 0x47, 0x90, 0x0d, 0x76, 0x6a, 0x78, 0x18, 0x13, - 0x9e, 0x07, 0xe7, 0xbb, 0xec, 0x6a, 0xe8, 0xb2, 0x4c, 0xc0, 0xd4, 0xc0, 0x98, 0x70, 0x67, 0x0d, - 0x57, 0xe8, 0x10, 0xb2, 0x41, 0xbc, 0x0d, 0xd6, 0xeb, 0x62, 0x75, 0x2e, 0x14, 0x37, 0x36, 0x5b, - 0x8e, 0x7b, 0x5d, 0x2c, 0xec, 0x23, 0xe1, 0x4a, 0xb6, 0x2f, 0x82, 0xa1, 0x5d, 0x58, 0x64, 0xa6, - 0x4d, 0x98, 0xa7, 0x2e, 0xf0, 0xdc, 0xdb, 0x2c, 0x8b, 0x73, 0x54, 0x36, 0xbb, 0x76, 0x39, 0x38, - 0x6b, 0xe5, 0x67, 0xef, 0x95, 0x8f, 0x03, 0x8a, 0xea, 0x4a, 0x68, 0x5b, 0xc8, 0xa0, 0x87, 0xbf, - 0xe8, 0x08, 0x16, 0x1d, 0xb3, 0x89, 0x1d, 0x4f, 0x5d, 0xe4, 0x22, 0x4a, 0xe3, 0x0d, 0x2a, 0x3f, - 0xe4, 0x44, 0xfb, 0x84, 0xb9, 0xbd, 0xea, 0xd5, 0x41, 0x5f, 0xcb, 0x0b, 0x2e, 0xc9, 0xac, 0x50, - 0x0e, 0x32, 0x60, 0x95, 0x51, 0x66, 0x3a, 0x46, 0x74, 0x6e, 0x3d, 0x75, 0xe9, 0xd5, 0xb2, 0x99, - 0xb3, 0x47, 0x28, 0x4f, 0x4f, 0xac, 0xd1, 0xdf, 0x14, 0xb8, 0x6d, 0x3a, 0x0e, 0x6d, 0x99, 0xcc, - 0x6c, 0x3a, 0xd8, 0x68, 0xf6, 0x8c, 0xae, 0x6b, 0x53, 0xd7, 0x66, 0x3d, 0xc3, 0x24, 0xd6, 0x50, - 0xaf, 0x9a, 0xe1, 0x3b, 0xfa, 0xde, 0x84, 0x1d, 0xed, 0xc6, 0x22, 0xaa, 0xbd, 0xa3, 0x50, 0xc0, - 0x2e, 0xb1, 0x22, 0x45, 0x62, 0xaf, 0xdb, 0xa1, 0x51, 0x5b, 0xe6, 0x14, 0x72, 0x7d, 0x2a, 0x05, - 0x72, 0x61, 0xdd, 0x63, 0x26, 0xe3, 0x16, 0x87, 0x87, 0xc4, 0xb0, 0x2d, 0x7e, 0x4c, 0x72, 0x3b, - 0xef, 0x4c, 0x30, 0xb3, 0x11, 0x70, 0x54, 0x7b, 0xe2, 0x64, 0xd4, 0x2c, 0x61, 0xd5, 0xf5, 0xd0, - 0xaa, 0x55, 0x6f, 0x14, 0xab, 0x27, 0x01, 0xc8, 0x87, 0xf5, 0xd0, 0x2e, 0x6c, 0x45, 0x7a, 0x6d, - 0x4b, 0x05, 0xae, 0xf3, 0xce, 0xf9, 0xae, 0xc1, 0x16, 0x17, 0x14, 0x29, 0x55, 0x43, 0xa5, 0x79, - 0x33, 0x81, 0xd6, 0x53, 0x10, 0xc4, 0x00, 0x8d, 0xa8, 0x7d, 0xea, 0x63, 0x1f, 0xab, 0xb9, 0x8b, - 0x6a, 0x7d, 0x1c, 0x90, 0x4f, 0xd6, 0xca, 0xd1, 0x7a, 0x0a, 0x12, 0x6c, 0x16, 0x3f, 0xb3, 0x5b, - 0x2c, 0x2e, 0x42, 0x86, 0x6d, 0x79, 0xea, 0xca, 0xb9, 0x6a, 0xf7, 0x05, 0x47, 0xe4, 0x31, 0x2f, - 0xa1, 0x16, 0x27, 0xd0, 0x7a, 0x0a, 0x82, 0xbe, 0x50, 0xa0, 0x48, 0x28, 0x31, 0x4c, 0xb7, 0x63, - 0x5a, 0xa6, 0x11, 0x6f, 0x3c, 0x3e, 0x01, 0x57, 0xb8, 0x09, 0xdf, 0x99, 0x60, 0x42, 0x9d, 0x92, - 0x5d, 0xce, 0x3b, 0x74, 0xc1, 0x30, 0xdb, 0x85, 0x35, 0x6f, 0x86, 0xd6, 0xdc, 0x20, 0x93, 0x29, - 0xf5, 0xf3, 0x90, 0x68, 0x17, 0xae, 0xf8, 0x24, 0xd4, 0x1e, 0x64, 0xa8, 0xba, 0xba, 0xa5, 0x6c, - 0x67, 0xaa, 0x37, 0x06, 0x7d, 0xed, 0xfa, 0x08, 0x42, 0x3a, 0xd1, 0xa3, 0x1c, 0xe8, 0x13, 0x05, - 0xae, 0x47, 0x3b, 0x32, 0x7c, 0xcf, 0x6c, 0xe3, 0x38, 0xb2, 0x79, 0xbe, 0xbf, 0x6f, 0x4d, 0xd8, - 0x5f, 0x64, 0xc6, 0x49, 0xc0, 0x34, 0x12, 0xdd, 0xd2, 0xa0, 0xaf, 0x15, 0xdd, 0x31, 0x68, 0xc9, - 0x8c, 0xab, 0xe3, 0xf0, 0xc1, 0x9d, 0xe3, 0xe2, 0x2e, 0x75, 0x99, 0x4d, 0xda, 0x46, 0x5c, 0x56, - 0xd7, 0x78, 0x69, 0xe7, 0x77, 0xce, 0x10, 0x5d, 0x4f, 0xd7, 0xd0, 0xb5, 0x14, 0xb2, 0x60, 0x42, - 0x4e, 0x2a, 0x72, 0xe8, 0x4d, 0x98, 0x3b, 0xc3, 0xbd, 0xf0, 0xea, 0x59, 0x1b, 0xf4, 0xb5, 0x2b, - 0x67, 0xb8, 0x27, 0x49, 0x08, 0xb0, 0xe8, 0x6d, 0x58, 0x78, 0x66, 0x3a, 0x3e, 0x0e, 0x9b, 0x04, - 0x7e, 0xc7, 0x73, 0x80, 0x7c, 0xc7, 0x73, 0xc0, 0xbd, 0xd9, 0xbb, 0x4a, 0xe1, 0x8f, 0x0a, 0xfc, - 0xdf, 0x85, 0xca, 0x8e, 0xac, 0x7d, 0x61, 0xa2, 0xf6, 0x9a, 0xac, 0x7d, 0x7a, 0x7d, 0x9d, 0x66, - 0xdd, 0xaf, 0x14, 0xb8, 0x3a, 0xae, 0xda, 0x5c, 0xcc, 0x15, 0x0f, 0x64, 0x63, 0x56, 0x76, 0x6e, - 0xa5, 0x8d, 0x11, 0x42, 0x85, 0x86, 0x69, 0xb6, 0x7c, 0xa2, 0xc0, 0xc6, 0xd8, 0x2a, 0x74, 0x31, - 0x63, 0xfe, 0xcb, 0x9e, 0x49, 0x58, 0x13, 0xe7, 0xef, 0xa5, 0x58, 0x73, 0x06, 0x1b, 0x63, 0x6b, - 0xd6, 0xd7, 0x48, 0xd9, 0xcc, 0x54, 0x65, 0xbf, 0x57, 0x60, 0x6b, 0x5a, 0x79, 0xba, 0x94, 0x6c, - 0xfd, 0xb5, 0x02, 0x9b, 0x13, 0xeb, 0xca, 0x65, 0xc4, 0xa5, 0xf4, 0xa7, 0x79, 0xc8, 0x44, 0xd5, - 0x24, 0x68, 0x5c, 0x6b, 0xa2, 0x71, 0x9d, 0x17, 0x8d, 0x6b, 0x6d, 0xa4, 0x71, 0xad, 0x59, 0x52, - 0xf3, 0x36, 0xfb, 0x75, 0x9b, 0xb7, 0xe3, 0x61, 0xf3, 0x26, 0x66, 0x8f, 0xb7, 0x26, 0x77, 0x93, - 0xaf, 0xd0, 0xc0, 0xfd, 0x42, 0x01, 0xe4, 0x13, 0x0f, 0xb3, 0x1a, 0xb1, 0xf0, 0x47, 0xd8, 0x12, - 0x9c, 0xea, 0x3c, 0x57, 0xb1, 0x73, 0x8e, 0x8a, 0x93, 0x14, 0x93, 0x50, 0xb7, 0x35, 0xe8, 0x6b, - 0x37, 0xd3, 0x12, 0x25, 0xd5, 0x63, 0xf4, 0xfd, 0x2f, 0xea, 0x71, 0x07, 0xae, 0x4f, 0xb0, 0xf9, - 0x75, 0xa8, 0x2b, 0x3d, 0x5f, 0x84, 0x4d, 0x9e, 0xa3, 0xf7, 0x1d, 0xdf, 0x63, 0xd8, 0x1d, 0x49, - 0x5f, 0x54, 0x83, 0xa5, 0x96, 0x8b, 0x83, 0xd3, 0xc5, 0xb5, 0x9e, 0x3f, 0x6a, 0xac, 0x87, 0x19, - 0x11, 0xb1, 0xf0, 0x49, 0x23, 0x5a, 0x04, 0x76, 0x89, 0x6b, 0x59, 0xb2, 0xeb, 0x69, 0xe2, 0x56, - 0x15, 0x14, 0xe8, 0x2e, 0x40, 0x34, 0xee, 0xd4, 0x2c, 0x3e, 0x94, 0x64, 0xab, 0xea, 0xa0, 0xaf, - 0x5d, 0x8d, 0xa1, 0x12, 0x93, 0x44, 0x8b, 0x7e, 0xa7, 0x04, 0x37, 0x70, 0x58, 0x07, 0xe2, 0xab, - 0x2c, 0xcc, 0x93, 0xbd, 0x74, 0x9e, 0x4c, 0xdc, 0xfa, 0xf0, 0x98, 0x49, 0x62, 0x44, 0xe6, 0xdc, - 0x0a, 0xb7, 0x39, 0x56, 0x91, 0xa2, 0x8f, 0x03, 0xa3, 0xbf, 0x2b, 0x70, 0x73, 0x0c, 0xfc, 0xbe, - 0x63, 0x7a, 0x5e, 0xdd, 0xe4, 0xb3, 0x6f, 0x60, 0xe0, 0xa3, 0x6f, 0x68, 0xe0, 0x50, 0x9e, 0xb0, - 0xf4, 0x76, 0x68, 0xe9, 0xb9, 0xaa, 0xf5, 0x73, 0xb1, 0x85, 0x4f, 0x15, 0x50, 0x27, 0xb9, 0xe2, - 0x52, 0x6a, 0xec, 0x1f, 0x14, 0x78, 0x63, 0xea, 0xd6, 0x2f, 0xa5, 0xd6, 0xfe, 0x63, 0x0e, 0x0a, - 0xe3, 0x22, 0xa5, 0xf3, 0xb6, 0x6e, 0xf8, 0x76, 0xa3, 0x4c, 0x79, 0xbb, 0x91, 0xce, 0xdc, 0xec, - 0x37, 0x3c, 0x73, 0x9f, 0x2a, 0x90, 0x97, 0xa2, 0xcb, 0x73, 0x29, 0x2c, 0xcb, 0xd5, 0xf4, 0x66, - 0x27, 0xdb, 0x2e, 0xe7, 0x9a, 0xd4, 0x28, 0x17, 0x07, 0x7d, 0xad, 0x90, 0x94, 0x2f, 0xed, 0x27, - 0xa5, 0xbb, 0xf0, 0xb9, 0x02, 0x1b, 0x63, 0x65, 0x5d, 0x2c, 0x60, 0x3f, 0x1a, 0x0d, 0xd8, 0x3b, - 0xaf, 0x70, 0x5c, 0xa6, 0x46, 0xef, 0x97, 0xb3, 0xb0, 0x2c, 0x87, 0x1b, 0x7d, 0x00, 0xd9, 0x78, - 0x56, 0x52, 0xb8, 0xd3, 0xde, 0x3d, 0x3f, 0x43, 0xca, 0x89, 0x09, 0x69, 0x2d, 0x0c, 0x4e, 0x2c, - 0x47, 0x8f, 0xff, 0x16, 0x7e, 0xab, 0xc0, 0xca, 0xe4, 0x9e, 0x65, 0xb2, 0x13, 0x7e, 0x32, 0xea, - 0x84, 0xb2, 0x74, 0x45, 0x0f, 0xdf, 0x29, 0xcb, 0xdd, 0xb3, 0x36, 0xbf, 0xb3, 0x23, 0x75, 0xe5, - 0xc7, 0xbe, 0x49, 0x98, 0xcd, 0x7a, 0x53, 0xfd, 0xf0, 0xd5, 0x02, 0xac, 0x1d, 0xd0, 0x66, 0x43, - 0x6c, 0xd4, 0x26, 0xed, 0x1a, 0x39, 0xa5, 0x68, 0x07, 0x32, 0x8e, 0x7d, 0x8a, 0x99, 0xdd, 0xc1, - 0xdc, 0xbc, 0x2b, 0xe2, 0x25, 0x28, 0x82, 0xc9, 0x2f, 0x41, 0x11, 0x0c, 0xdd, 0x83, 0x65, 0x93, - 0x19, 0x1d, 0xea, 0x31, 0x83, 0x92, 0x56, 0xd4, 0xdc, 0xf1, 0x42, 0x6e, 0xb2, 0x47, 0xd4, 0x63, - 0x87, 0xa4, 0x25, 0x73, 0x42, 0x0c, 0x45, 0xdf, 0x85, 0x5c, 0xd7, 0xc5, 0x01, 0xdc, 0x0e, 0x06, - 0xc3, 0x39, 0xce, 0xba, 0x39, 0xe8, 0x6b, 0x1b, 0x12, 0x58, 0xe2, 0x95, 0xa9, 0xd1, 0x03, 0xc8, - 0xb7, 0x28, 0x69, 0xf9, 0xae, 0x8b, 0x49, 0xab, 0x67, 0x78, 0xe6, 0xa9, 0x78, 0xbc, 0xcc, 0x54, - 0x6f, 0x0d, 0xfa, 0xda, 0xa6, 0x84, 0x6b, 0x98, 0xa7, 0xb2, 0x94, 0xd5, 0x04, 0x2a, 0x18, 0xe8, - 0x86, 0xcf, 0x38, 0xad, 0xa0, 0xc2, 0x18, 0xfc, 0x5d, 0x6f, 0x31, 0x1e, 0xe8, 0xba, 0xc9, 0xfa, - 0x23, 0x0f, 0x74, 0x29, 0x24, 0x6a, 0x40, 0xce, 0xf3, 0x9b, 0x1d, 0x9b, 0x19, 0xdc, 0x95, 0x4b, - 0x53, 0x0f, 0x78, 0xf4, 0x00, 0x05, 0x82, 0x6d, 0xf8, 0xdc, 0x29, 0xad, 0x83, 0xe0, 0x44, 0x9a, - 0xd4, 0x4c, 0x1c, 0x9c, 0x08, 0x26, 0x07, 0x27, 0x82, 0xa1, 0x9f, 0xc3, 0xba, 0x48, 0x61, 0xc3, - 0xc5, 0x4f, 0x7d, 0xdb, 0xc5, 0x1d, 0x1c, 0xbf, 0xd9, 0xdd, 0x4e, 0xe7, 0xf9, 0x21, 0xff, 0xd5, - 0x25, 0x5a, 0xd1, 0x42, 0xd1, 0x14, 0x5c, 0x6e, 0xa1, 0xd2, 0x58, 0x54, 0x81, 0xa5, 0x67, 0xd8, - 0xf5, 0x6c, 0x4a, 0xd4, 0x2c, 0xb7, 0x75, 0x63, 0xd0, 0xd7, 0xd6, 0x42, 0x90, 0xc4, 0x1b, 0x51, - 0xa1, 0x1a, 0xac, 0xf1, 0xb6, 0xc0, 0x60, 0xcc, 0x31, 0x3c, 0xdc, 0xa2, 0xc4, 0xf2, 0x54, 0xd8, - 0x52, 0xb6, 0xe7, 0x44, 0x38, 0x39, 0xf2, 0x98, 0x39, 0x0d, 0x81, 0x92, 0xc3, 0x99, 0x40, 0xdd, - 0x9b, 0xff, 0xfc, 0x0b, 0x4d, 0x29, 0xfd, 0x46, 0x01, 0x94, 0xde, 0x0e, 0x72, 0x60, 0xb5, 0x4b, - 0x2d, 0x19, 0x14, 0xf6, 0x3c, 0x6f, 0xa4, 0xbd, 0x71, 0x34, 0x4a, 0x28, 0x0c, 0x49, 0x70, 0xc7, - 0x86, 0x3c, 0x98, 0xd1, 0x93, 0xa2, 0xab, 0x2b, 0xb0, 0x2c, 0x3b, 0xbe, 0xf4, 0xef, 0x45, 0x58, - 0x4d, 0x48, 0x45, 0x1e, 0x2c, 0x13, 0x6a, 0xe1, 0x06, 0x76, 0x70, 0x8b, 0x51, 0x37, 0x2c, 0x42, - 0xef, 0x4f, 0x35, 0x87, 0x77, 0xbf, 0x11, 0x97, 0x28, 0x45, 0x85, 0x41, 0x5f, 0xbb, 0x26, 0x0b, - 0x93, 0xdc, 0x34, 0xa2, 0x04, 0x1d, 0x41, 0xc6, 0x3c, 0x3d, 0xb5, 0x49, 0x90, 0x4c, 0xa2, 0xc2, - 0xdc, 0x1c, 0x37, 0x04, 0xec, 0x86, 0x34, 0x22, 0xd5, 0x22, 0x0e, 0x39, 0xd5, 0x22, 0x18, 0x3a, - 0x81, 0x1c, 0xa3, 0x0e, 0x76, 0x4d, 0x66, 0x53, 0x12, 0x8d, 0x05, 0xc5, 0xb1, 0x93, 0xc5, 0x90, - 0x6c, 0x78, 0xb1, 0xc9, 0xac, 0xba, 0xbc, 0x40, 0x14, 0x72, 0x26, 0x21, 0x94, 0x85, 0x62, 0x97, - 0x26, 0x8d, 0x02, 0x49, 0xe7, 0xec, 0xc6, 0x4c, 0xc2, 0x37, 0xbc, 0xac, 0x48, 0xa2, 0xe4, 0xb2, - 0x22, 0x81, 0x47, 0x8e, 0xd9, 0x3c, 0x6f, 0x79, 0xa6, 0x1f, 0xb3, 0x03, 0xc8, 0x47, 0x95, 0x89, - 0x92, 0x23, 0xea, 0xd8, 0xad, 0x1e, 0xff, 0xce, 0x91, 0x15, 0x97, 0x67, 0x12, 0x27, 0x5f, 0x9e, - 0x49, 0x1c, 0xfa, 0x18, 0x86, 0xaf, 0x4e, 0x23, 0x59, 0xba, 0xc8, 0xa3, 0xb4, 0x3d, 0xce, 0xa1, - 0xfa, 0x18, 0xfa, 0xea, 0xcd, 0xd0, 0xb5, 0x63, 0xa5, 0xe9, 0x63, 0xa1, 0x85, 0x36, 0xac, 0xa5, - 0x92, 0xea, 0xb5, 0x8c, 0x3f, 0xa7, 0x90, 0x4f, 0x06, 0xe8, 0x75, 0xe8, 0x39, 0x98, 0xcf, 0x64, - 0xf2, 0xd9, 0xd2, 0x9f, 0x15, 0xd8, 0x3c, 0xf2, 0x1d, 0xcf, 0x74, 0x1b, 0x51, 0xda, 0x1c, 0xd0, - 0xe6, 0x1e, 0x66, 0xa6, 0xed, 0x78, 0x81, 0x48, 0xfe, 0xc8, 0x13, 0x6a, 0xe6, 0x22, 0x39, 0x40, - 0x16, 0x29, 0xde, 0x96, 0xdf, 0x86, 0x85, 0xc7, 0xc9, 0xe9, 0x26, 0xd9, 0x0e, 0x09, 0x0a, 0x74, - 0x07, 0x16, 0x83, 0xfb, 0x15, 0xb3, 0x70, 0xb2, 0xe1, 0x83, 0xaf, 0x80, 0xc8, 0x83, 0xaf, 0x80, - 0xfc, 0xff, 0x21, 0xe4, 0xa4, 0x37, 0x2a, 0x94, 0x83, 0xa5, 0x93, 0xfa, 0x0f, 0xeb, 0x87, 0x3f, - 0xae, 0xe7, 0x67, 0x82, 0xc5, 0xd1, 0x7e, 0x7d, 0xaf, 0x56, 0xff, 0x41, 0x5e, 0x09, 0x16, 0xfa, - 0x49, 0xbd, 0x1e, 0x2c, 0x66, 0xd1, 0x15, 0xc8, 0x36, 0x4e, 0xee, 0xdf, 0xdf, 0xdf, 0xdf, 0xdb, - 0xdf, 0xcb, 0xcf, 0x21, 0x80, 0xc5, 0xef, 0xef, 0xd6, 0x1e, 0xee, 0xef, 0xe5, 0xe7, 0xab, 0x3f, - 0x7b, 0xfe, 0xa2, 0xa8, 0x7c, 0xf9, 0xa2, 0xa8, 0xfc, 0xeb, 0x45, 0x51, 0xf9, 0xec, 0x65, 0x71, - 0xe6, 0xcb, 0x97, 0xc5, 0x99, 0x7f, 0xbe, 0x2c, 0xce, 0xfc, 0xf4, 0xbe, 0xf4, 0xe9, 0x52, 0x3c, - 0x1b, 0x77, 0x5d, 0x1a, 0x9c, 0xa1, 0x70, 0x55, 0xb9, 0xc0, 0x37, 0xda, 0xe6, 0x22, 0xbf, 0xc3, - 0xde, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0xb6, 0x98, 0xb3, 0xd1, 0x1d, 0x00, 0x00, + // 2129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x39, 0x4f, 0x6f, 0x1b, 0xc7, + 0xf5, 0x5a, 0x89, 0xa2, 0xc8, 0x47, 0x59, 0xa2, 0x46, 0x96, 0xbd, 0xa2, 0x6d, 0xae, 0xc2, 0xf8, + 0x17, 0x28, 0xbf, 0x38, 0x64, 0xa3, 0x14, 0xa8, 0xe1, 0xf6, 0x22, 0x5a, 0x6a, 0x4d, 0xd5, 0xa6, + 0xe4, 0x95, 0xd5, 0xa2, 0x05, 0x9a, 0xc5, 0x92, 0x3b, 0xa2, 0x37, 0x5a, 0xce, 0xd0, 0xbb, 0xb3, + 0x6e, 0x98, 0x73, 0x7b, 0x28, 0x02, 0xa4, 0x41, 0x91, 0xb6, 0x01, 0x0a, 0xb4, 0xc8, 0xad, 0xe7, + 0x1e, 0xda, 0x43, 0xbf, 0x80, 0x8f, 0x39, 0xf6, 0xc4, 0x16, 0xf6, 0x8d, 0xd7, 0x7e, 0x81, 0x62, + 0x67, 0x76, 0xb9, 0xc3, 0x5d, 0x52, 0x94, 0x93, 0xba, 0x3e, 0x91, 0xf3, 0xfe, 0xcf, 0x7b, 0x6f, + 0xde, 0xbc, 0x37, 0x0b, 0x77, 0x6c, 0xc2, 0xb0, 0x4b, 0x4c, 0xa7, 0xe6, 0xb5, 0x1f, 0x63, 0xcb, + 0x77, 0xb0, 0x1b, 0xff, 0xa3, 0xad, 0x0f, 0x71, 0x9b, 0x79, 0x29, 0x40, 0xb5, 0xe7, 0x52, 0x46, + 0x51, 0x31, 0x09, 0x2f, 0x69, 0x1d, 0x4a, 0x3b, 0x0e, 0xae, 0x71, 0x7c, 0xcb, 0x3f, 0xad, 0x31, + 0xbb, 0x8b, 0x3d, 0x66, 0x76, 0x7b, 0x82, 0xa5, 0x54, 0x39, 0xbb, 0xed, 0x55, 0x6d, 0x5a, 0x33, + 0x7b, 0x76, 0xad, 0x4d, 0x5d, 0x5c, 0x7b, 0xfa, 0x5e, 0xad, 0x83, 0x09, 0x76, 0x4d, 0x86, 0xad, + 0x90, 0xe6, 0xdb, 0x31, 0x4d, 0xd7, 0x6c, 0x3f, 0xb6, 0x09, 0x76, 0xfb, 0xb5, 0xde, 0x59, 0x87, + 0x33, 0xb9, 0xd8, 0xa3, 0xbe, 0xdb, 0xc6, 0x29, 0xae, 0x77, 0x3b, 0x36, 0x7b, 0xec, 0xb7, 0xaa, + 0x6d, 0xda, 0xad, 0x75, 0x68, 0x87, 0xc6, 0x36, 0x04, 0x2b, 0xbe, 0xe0, 0xff, 0x04, 0x79, 0xe5, + 0xcf, 0x0b, 0x90, 0xdb, 0xff, 0x08, 0xb7, 0x7d, 0x46, 0x5d, 0xb4, 0x05, 0xf3, 0xb6, 0xa5, 0x2a, + 0x5b, 0xca, 0x76, 0xbe, 0x5e, 0x1c, 0x0e, 0xb4, 0x65, 0xdb, 0xba, 0x45, 0xbb, 0x36, 0xc3, 0xdd, + 0x1e, 0xeb, 0xeb, 0xf3, 0xb6, 0x85, 0xde, 0x82, 0x4c, 0x8f, 0x52, 0x47, 0x9d, 0xe7, 0x34, 0x68, + 0x38, 0xd0, 0x56, 0x82, 0xb5, 0x44, 0xc5, 0xf1, 0x68, 0x17, 0x16, 0x09, 0xb5, 0xb0, 0xa7, 0x2e, + 0x6c, 0x2d, 0x6c, 0x17, 0x76, 0xae, 0x54, 0x53, 0xae, 0x6b, 0x52, 0x0b, 0xd7, 0xd7, 0x87, 0x03, + 0x6d, 0x95, 0x13, 0x4a, 0x12, 0x04, 0x27, 0xfa, 0x00, 0x56, 0xba, 0x36, 0xb1, 0xbb, 0x7e, 0xf7, + 0x80, 0xb6, 0x8e, 0xed, 0x8f, 0xb1, 0x9a, 0xd9, 0x52, 0xb6, 0x0b, 0x3b, 0xe5, 0xb4, 0x2c, 0x3d, + 0x74, 0xc6, 0x7d, 0xdb, 0x63, 0xf5, 0x2b, 0xcf, 0x06, 0xda, 0x5c, 0x60, 0xd8, 0x38, 0xb7, 0x9e, + 0x58, 0x07, 0xf2, 0x1d, 0xd3, 0x63, 0x27, 0x3d, 0xcb, 0x64, 0xf8, 0x91, 0xdd, 0xc5, 0xea, 0x22, + 0x97, 0x5f, 0xaa, 0x8a, 0xe0, 0x55, 0x23, 0xc7, 0x55, 0x1f, 0x45, 0xc1, 0xab, 0x97, 0x22, 0xd9, + 0xe3, 0x9c, 0x9f, 0xfd, 0x53, 0x53, 0xf4, 0x04, 0x0c, 0x1d, 0xc2, 0xba, 0x4f, 0x4c, 0xcf, 0xb3, + 0x3b, 0x04, 0x5b, 0xc6, 0x87, 0xb4, 0x65, 0xb8, 0x3e, 0xf1, 0xd4, 0xfc, 0xd6, 0xc2, 0x76, 0xbe, + 0xae, 0x0d, 0x07, 0xda, 0xb5, 0x18, 0x7d, 0x40, 0x5b, 0xba, 0x4f, 0x64, 0x27, 0xac, 0xa5, 0x90, + 0x95, 0xbf, 0x6c, 0x40, 0x26, 0xf0, 0xda, 0xc5, 0xc2, 0x44, 0xcc, 0x2e, 0x56, 0x97, 0xe3, 0x30, + 0x05, 0x6b, 0x39, 0x4c, 0xc1, 0x1a, 0xed, 0x40, 0x0e, 0x87, 0xc1, 0x57, 0xd7, 0x39, 0xed, 0x95, + 0xe1, 0x40, 0x43, 0x11, 0x4c, 0xa2, 0x1f, 0xd1, 0xa1, 0x07, 0x90, 0x0f, 0x76, 0x6a, 0x78, 0x18, + 0x13, 0x9e, 0x07, 0xe7, 0xbb, 0xec, 0x72, 0xe8, 0xb2, 0x5c, 0xc0, 0x74, 0x8c, 0x31, 0xe1, 0xce, + 0x1a, 0xad, 0xd0, 0x21, 0xe4, 0x83, 0x78, 0x1b, 0xac, 0xdf, 0xc3, 0xea, 0x42, 0x28, 0x6e, 0x62, + 0xb6, 0x3c, 0xea, 0xf7, 0xb0, 0xb0, 0x8f, 0x84, 0x2b, 0xd9, 0xbe, 0x08, 0x86, 0x76, 0x21, 0xcb, + 0x4c, 0x9b, 0x30, 0x4f, 0x5d, 0xe4, 0xb9, 0xb7, 0x59, 0x15, 0xe7, 0xa8, 0x6a, 0xf6, 0xec, 0x6a, + 0x70, 0xd6, 0xaa, 0x4f, 0xdf, 0xab, 0x3e, 0x0a, 0x28, 0xea, 0x2b, 0xa1, 0x6d, 0x21, 0x83, 0x1e, + 0xfe, 0xa2, 0x23, 0xc8, 0x3a, 0x66, 0x0b, 0x3b, 0x9e, 0x9a, 0xe5, 0x22, 0x2a, 0x93, 0x0d, 0xaa, + 0xde, 0xe7, 0x44, 0xfb, 0x84, 0xb9, 0xfd, 0xfa, 0xe5, 0xe1, 0x40, 0x2b, 0x0a, 0x2e, 0xc9, 0xac, + 0x50, 0x0e, 0x32, 0x60, 0x95, 0x51, 0x66, 0x3a, 0x46, 0x74, 0x6e, 0x3d, 0x75, 0xe9, 0xe5, 0xb2, + 0x99, 0xb3, 0x47, 0x28, 0x4f, 0x4f, 0xac, 0xd1, 0x5f, 0x15, 0xb8, 0x69, 0x3a, 0x0e, 0x6d, 0x9b, + 0xcc, 0x6c, 0x39, 0xd8, 0x68, 0xf5, 0x8d, 0x9e, 0x6b, 0x53, 0xd7, 0x66, 0x7d, 0xc3, 0x24, 0xd6, + 0x48, 0xaf, 0x9a, 0xe3, 0x3b, 0xfa, 0xde, 0x94, 0x1d, 0xed, 0xc6, 0x22, 0xea, 0xfd, 0xa3, 0x50, + 0xc0, 0x2e, 0xb1, 0x22, 0x45, 0x62, 0xaf, 0xdb, 0xa1, 0x51, 0x5b, 0xe6, 0x0c, 0x72, 0x7d, 0x26, + 0x05, 0x72, 0x61, 0xdd, 0x63, 0x26, 0xe3, 0x16, 0x87, 0x87, 0xc4, 0xb0, 0x2d, 0x7e, 0x4c, 0x0a, + 0x3b, 0xef, 0x4c, 0x31, 0xf3, 0x38, 0xe0, 0xa8, 0xf7, 0xc5, 0xc9, 0x68, 0x58, 0xc2, 0xaa, 0xab, + 0xa1, 0x55, 0xab, 0xde, 0x38, 0x56, 0x4f, 0x02, 0x90, 0x0f, 0xeb, 0xa1, 0x5d, 0xd8, 0x8a, 0xf4, + 0xda, 0x96, 0x0a, 0x5c, 0xe7, 0xad, 0xf3, 0x5d, 0x83, 0x2d, 0x2e, 0x28, 0x52, 0xaa, 0x86, 0x4a, + 0x8b, 0x66, 0x02, 0xad, 0xa7, 0x20, 0x88, 0x01, 0x1a, 0x53, 0xfb, 0xc4, 0xc7, 0x3e, 0x56, 0x0b, + 0x17, 0xd5, 0xfa, 0x30, 0x20, 0x9f, 0xae, 0x95, 0xa3, 0xf5, 0x14, 0x24, 0xd8, 0x2c, 0x7e, 0x6a, + 0xb7, 0x59, 0x5c, 0x84, 0x0c, 0xdb, 0xf2, 0xd4, 0x95, 0x73, 0xd5, 0xee, 0x0b, 0x8e, 0xc8, 0x63, + 0x5e, 0x42, 0x2d, 0x4e, 0xa0, 0xf5, 0x14, 0x04, 0x7d, 0xa9, 0x40, 0x99, 0x50, 0x62, 0x98, 0x6e, + 0xd7, 0xb4, 0x4c, 0x23, 0xde, 0x78, 0x7c, 0x02, 0x2e, 0x71, 0x13, 0xbe, 0x33, 0xc5, 0x84, 0x26, + 0x25, 0xbb, 0x9c, 0x77, 0xe4, 0x82, 0x51, 0xb6, 0x0b, 0x6b, 0xde, 0x0c, 0xad, 0xb9, 0x46, 0xa6, + 0x53, 0xea, 0xe7, 0x21, 0xd1, 0x2e, 0x5c, 0xf2, 0x49, 0xa8, 0x3d, 0xc8, 0x50, 0x75, 0x75, 0x4b, + 0xd9, 0xce, 0xd5, 0xaf, 0x0d, 0x07, 0xda, 0xd5, 0x31, 0x84, 0x74, 0xa2, 0xc7, 0x39, 0xd0, 0x27, + 0x0a, 0x5c, 0x8d, 0x76, 0x64, 0xf8, 0x9e, 0xd9, 0xc1, 0x71, 0x64, 0x8b, 0x7c, 0x7f, 0xdf, 0x9a, + 0xb2, 0xbf, 0xc8, 0x8c, 0x93, 0x80, 0x69, 0x2c, 0xba, 0x95, 0xe1, 0x40, 0x2b, 0xbb, 0x13, 0xd0, + 0x92, 0x19, 0x97, 0x27, 0xe1, 0x83, 0x3b, 0xc7, 0xc5, 0x3d, 0xea, 0x32, 0x9b, 0x74, 0x8c, 0xb8, + 0xac, 0xae, 0xf1, 0xd2, 0xce, 0xef, 0x9c, 0x11, 0xba, 0x99, 0xae, 0xa1, 0x6b, 0x29, 0x64, 0xc9, + 0x84, 0x82, 0x54, 0xe4, 0xd0, 0x9b, 0xb0, 0x70, 0x86, 0xfb, 0xe1, 0xd5, 0xb3, 0x36, 0x1c, 0x68, + 0x97, 0xce, 0x70, 0x5f, 0x92, 0x10, 0x60, 0xd1, 0xdb, 0xb0, 0xf8, 0xd4, 0x74, 0x7c, 0x1c, 0x36, + 0x09, 0xfc, 0x8e, 0xe7, 0x00, 0xf9, 0x8e, 0xe7, 0x80, 0x3b, 0xf3, 0xb7, 0x95, 0xd2, 0x1f, 0x14, + 0xf8, 0xbf, 0x0b, 0x95, 0x1d, 0x59, 0xfb, 0xe2, 0x54, 0xed, 0x0d, 0x59, 0xfb, 0xec, 0xfa, 0x3a, + 0xcb, 0xba, 0x5f, 0x29, 0x70, 0x79, 0x52, 0xb5, 0xb9, 0x98, 0x2b, 0xee, 0xc9, 0xc6, 0xac, 0xec, + 0xdc, 0x48, 0x1b, 0x23, 0x84, 0x0a, 0x0d, 0xb3, 0x6c, 0xf9, 0x44, 0x81, 0x8d, 0x89, 0x55, 0xe8, + 0x62, 0xc6, 0xfc, 0x97, 0x3d, 0x93, 0xb0, 0x26, 0xce, 0xdf, 0xd7, 0x62, 0xcd, 0x19, 0x6c, 0x4c, + 0xac, 0x59, 0x5f, 0x23, 0x65, 0x73, 0x33, 0x95, 0xfd, 0x4e, 0x81, 0xad, 0x59, 0xe5, 0xe9, 0xb5, + 0x64, 0xeb, 0xaf, 0x15, 0xd8, 0x9c, 0x5a, 0x57, 0x5e, 0x47, 0x5c, 0x2a, 0x7f, 0xcc, 0x40, 0x2e, + 0xaa, 0x26, 0x41, 0xe3, 0xda, 0x10, 0x8d, 0x6b, 0x46, 0x34, 0xae, 0x8d, 0xb1, 0xc6, 0xb5, 0x61, + 0x49, 0xcd, 0xdb, 0xfc, 0xd7, 0x6d, 0xde, 0x1e, 0x8d, 0x9a, 0x37, 0x31, 0x7b, 0xbc, 0x35, 0xbd, + 0x9b, 0x7c, 0x89, 0x06, 0xee, 0x17, 0x0a, 0x20, 0x9f, 0x78, 0x98, 0x35, 0x88, 0x85, 0x3f, 0xc2, + 0x96, 0xe0, 0x54, 0x33, 0x5c, 0xc5, 0xce, 0x39, 0x2a, 0x4e, 0x52, 0x4c, 0x42, 0xdd, 0xd6, 0x70, + 0xa0, 0x5d, 0x4f, 0x4b, 0x94, 0x54, 0x4f, 0xd0, 0xf7, 0xbf, 0xa8, 0xc7, 0x5d, 0xb8, 0x3a, 0xc5, + 0xe6, 0x57, 0xa1, 0xae, 0xf2, 0x2c, 0x0b, 0x9b, 0x3c, 0x47, 0xef, 0x3a, 0xbe, 0xc7, 0xb0, 0x3b, + 0x96, 0xbe, 0xa8, 0x01, 0x4b, 0x6d, 0x17, 0x07, 0xa7, 0x8b, 0x6b, 0x3d, 0x7f, 0xd4, 0x58, 0x0f, + 0x33, 0x22, 0x62, 0xe1, 0x93, 0x46, 0xb4, 0x08, 0xec, 0x12, 0xd7, 0xb2, 0x64, 0xd7, 0x93, 0xc4, + 0xad, 0x2a, 0x28, 0xd0, 0x6d, 0x80, 0x68, 0xdc, 0x69, 0x58, 0x7c, 0x28, 0xc9, 0xd7, 0xd5, 0xe1, + 0x40, 0xbb, 0x1c, 0x43, 0x25, 0x26, 0x89, 0x16, 0xfd, 0x56, 0x09, 0x6e, 0xe0, 0xb0, 0x0e, 0xc4, + 0x57, 0x59, 0x98, 0x27, 0x7b, 0xe9, 0x3c, 0x99, 0xba, 0xf5, 0xd1, 0x31, 0x93, 0xc4, 0x88, 0xcc, + 0xb9, 0x11, 0x6e, 0x73, 0xa2, 0x22, 0x45, 0x9f, 0x04, 0x46, 0x7f, 0x53, 0xe0, 0xfa, 0x04, 0xf8, + 0x5d, 0xc7, 0xf4, 0xbc, 0xa6, 0xc9, 0x67, 0xdf, 0xc0, 0xc0, 0x07, 0xdf, 0xd0, 0xc0, 0x91, 0x3c, + 0x61, 0xe9, 0xcd, 0xd0, 0xd2, 0x73, 0x55, 0xeb, 0xe7, 0x62, 0x4b, 0x9f, 0x2a, 0xa0, 0x4e, 0x73, + 0xc5, 0x6b, 0xa9, 0xb1, 0xbf, 0x57, 0xe0, 0x8d, 0x99, 0x5b, 0x7f, 0x2d, 0xb5, 0xf6, 0xef, 0x0b, + 0x50, 0x9a, 0x14, 0x29, 0x9d, 0xb7, 0x75, 0xa3, 0xb7, 0x1b, 0x65, 0xc6, 0xdb, 0x8d, 0x74, 0xe6, + 0xe6, 0xbf, 0xe1, 0x99, 0xfb, 0x54, 0x81, 0xa2, 0x14, 0x5d, 0x9e, 0x4b, 0x61, 0x59, 0xae, 0xa7, + 0x37, 0x3b, 0xdd, 0x76, 0x39, 0xd7, 0xa4, 0x46, 0xb9, 0x3c, 0x1c, 0x68, 0xa5, 0xa4, 0x7c, 0x69, + 0x3f, 0x29, 0xdd, 0xa5, 0x2f, 0x14, 0xd8, 0x98, 0x28, 0xeb, 0x62, 0x01, 0xfb, 0xd1, 0x78, 0xc0, + 0xde, 0x79, 0x89, 0xe3, 0x32, 0x33, 0x7a, 0xbf, 0x9c, 0x87, 0x65, 0x39, 0xdc, 0xe8, 0x03, 0xc8, + 0xc7, 0xb3, 0x92, 0xc2, 0x9d, 0xf6, 0xee, 0xf9, 0x19, 0x52, 0x4d, 0x4c, 0x48, 0x6b, 0x61, 0x70, + 0x62, 0x39, 0x7a, 0xfc, 0xb7, 0xf4, 0xb9, 0x02, 0x2b, 0xd3, 0x7b, 0x96, 0xe9, 0x4e, 0xf8, 0xc9, + 0xb8, 0x13, 0xaa, 0xd2, 0x15, 0x3d, 0x7a, 0xa7, 0xac, 0xf6, 0xce, 0x3a, 0xfc, 0xce, 0x8e, 0xd4, + 0x55, 0x1f, 0xfa, 0x26, 0x61, 0x36, 0xeb, 0xcf, 0xf4, 0xc3, 0xe7, 0x8b, 0xb0, 0x76, 0x40, 0x5b, + 0xc7, 0x62, 0xa3, 0x36, 0xe9, 0x34, 0xc8, 0x29, 0x45, 0x3b, 0x90, 0x73, 0xec, 0x53, 0xcc, 0xec, + 0x2e, 0xe6, 0xe6, 0x5d, 0x12, 0x2f, 0x41, 0x11, 0x4c, 0x7e, 0x09, 0x8a, 0x60, 0xe8, 0x0e, 0x2c, + 0x9b, 0xcc, 0xe8, 0x52, 0x8f, 0x19, 0x94, 0xb4, 0xa3, 0xe6, 0x8e, 0x17, 0x72, 0x93, 0x3d, 0xa0, + 0x1e, 0x3b, 0x24, 0x6d, 0x99, 0x13, 0x62, 0x28, 0xfa, 0x2e, 0x14, 0x7a, 0x2e, 0x0e, 0xe0, 0x76, + 0x30, 0x18, 0x2e, 0x70, 0xd6, 0xcd, 0xe1, 0x40, 0xdb, 0x90, 0xc0, 0x12, 0xaf, 0x4c, 0x8d, 0xee, + 0x41, 0xb1, 0x4d, 0x49, 0xdb, 0x77, 0x5d, 0x4c, 0xda, 0x7d, 0xc3, 0x33, 0x4f, 0xc5, 0xe3, 0x65, + 0xae, 0x7e, 0x63, 0x38, 0xd0, 0x36, 0x25, 0xdc, 0xb1, 0x79, 0x2a, 0x4b, 0x59, 0x4d, 0xa0, 0x82, + 0x81, 0x6e, 0xf4, 0x8c, 0xd3, 0x0e, 0x2a, 0x8c, 0xc1, 0xdf, 0xf5, 0xb2, 0xf1, 0x40, 0xd7, 0x4b, + 0xd6, 0x1f, 0x79, 0xa0, 0x4b, 0x21, 0xd1, 0x31, 0x14, 0x3c, 0xbf, 0xd5, 0xb5, 0x99, 0xc1, 0x5d, + 0xb9, 0x34, 0xf3, 0x80, 0x47, 0x0f, 0x50, 0x20, 0xd8, 0x46, 0xcf, 0x9d, 0xd2, 0x3a, 0x08, 0x4e, + 0xa4, 0x49, 0xcd, 0xc5, 0xc1, 0x89, 0x60, 0x72, 0x70, 0x22, 0x18, 0xfa, 0x39, 0xac, 0x8b, 0x14, + 0x36, 0x5c, 0xfc, 0xc4, 0xb7, 0x5d, 0xdc, 0xc5, 0xf1, 0x9b, 0xdd, 0xcd, 0x74, 0x9e, 0x1f, 0xf2, + 0x5f, 0x5d, 0xa2, 0x15, 0x2d, 0x14, 0x4d, 0xc1, 0xe5, 0x16, 0x2a, 0x8d, 0x45, 0x35, 0x58, 0x7a, + 0x8a, 0x5d, 0xcf, 0xa6, 0x44, 0xcd, 0x73, 0x5b, 0x37, 0x86, 0x03, 0x6d, 0x2d, 0x04, 0x49, 0xbc, + 0x11, 0xd5, 0x9d, 0xcc, 0x17, 0x5f, 0x6a, 0x4a, 0xe5, 0x37, 0x0a, 0xa0, 0xb4, 0x0d, 0xc8, 0x81, + 0xd5, 0x1e, 0xb5, 0x64, 0x50, 0xd8, 0xa8, 0xbc, 0x91, 0xde, 0xc2, 0xd1, 0x38, 0xa1, 0x48, 0x86, + 0x04, 0x77, 0x6c, 0xc0, 0xbd, 0x39, 0x3d, 0x29, 0xba, 0xbe, 0x02, 0xcb, 0xb2, 0xb7, 0x2a, 0xff, + 0xce, 0xc2, 0x6a, 0x42, 0x2a, 0xf2, 0x60, 0x39, 0x98, 0xfc, 0x8f, 0xb1, 0x83, 0xdb, 0x8c, 0xba, + 0x61, 0xe5, 0x78, 0x7f, 0xa6, 0x39, 0xbc, 0x65, 0x8d, 0xb8, 0x44, 0xfd, 0x28, 0x0d, 0x07, 0xda, + 0x15, 0x59, 0x98, 0xe4, 0x9e, 0x31, 0x25, 0xe8, 0x08, 0x72, 0xe6, 0xe9, 0xa9, 0x4d, 0x82, 0x0c, + 0x10, 0x65, 0xe1, 0xfa, 0xa4, 0xce, 0x7d, 0x37, 0xa4, 0x11, 0xf9, 0x11, 0x71, 0xc8, 0xf9, 0x11, + 0xc1, 0xd0, 0x09, 0x14, 0x18, 0x75, 0xb0, 0x6b, 0x32, 0x9b, 0x92, 0xa8, 0x97, 0x2f, 0x4f, 0x1c, + 0x07, 0x46, 0x64, 0xa3, 0xdb, 0x48, 0x66, 0xd5, 0xe5, 0x05, 0xa2, 0x50, 0x30, 0x09, 0xa1, 0x2c, + 0x14, 0xbb, 0x34, 0xad, 0x7f, 0x4f, 0x3a, 0x67, 0x37, 0x66, 0x12, 0xbe, 0xe1, 0xb5, 0x40, 0x12, + 0x25, 0xd7, 0x02, 0x09, 0x3c, 0x76, 0x36, 0x32, 0xbc, 0x4f, 0x99, 0x7d, 0x36, 0x0e, 0xa0, 0x18, + 0x95, 0x13, 0x4a, 0x8e, 0xa8, 0x63, 0xb7, 0xfb, 0xfc, 0xe3, 0x44, 0x5e, 0xdc, 0x78, 0x49, 0x9c, + 0x7c, 0xe3, 0x25, 0x71, 0xe8, 0x63, 0x18, 0x3d, 0x15, 0x8d, 0x65, 0x69, 0x96, 0x47, 0x69, 0x7b, + 0x92, 0x43, 0xf5, 0x09, 0xf4, 0xf5, 0xeb, 0xa1, 0x6b, 0x27, 0x4a, 0xd3, 0x27, 0x42, 0x4b, 0x1d, + 0x58, 0x4b, 0x25, 0xd5, 0x2b, 0x99, 0x59, 0x4e, 0xa1, 0x98, 0x0c, 0xd0, 0xab, 0xd0, 0x73, 0x90, + 0xc9, 0xe5, 0x8a, 0xf9, 0xca, 0x9f, 0x14, 0xd8, 0x3c, 0xf2, 0x1d, 0xcf, 0x74, 0x8f, 0xa3, 0xb4, + 0x39, 0xa0, 0xad, 0x3d, 0xcc, 0x4c, 0xdb, 0xf1, 0x02, 0x91, 0xfc, 0x65, 0x26, 0xd4, 0xcc, 0x45, + 0x72, 0x80, 0x2c, 0x52, 0x3c, 0x08, 0xbf, 0x0d, 0x8b, 0x0f, 0x93, 0x23, 0x49, 0xb2, 0x87, 0x11, + 0x14, 0xe8, 0x16, 0x64, 0x83, 0x4b, 0x11, 0xb3, 0x70, 0x1c, 0xe1, 0xd3, 0xaa, 0x80, 0xc8, 0xd3, + 0xaa, 0x80, 0xfc, 0xff, 0x21, 0x14, 0xa4, 0x87, 0x25, 0x54, 0x80, 0xa5, 0x93, 0xe6, 0x0f, 0x9b, + 0x87, 0x3f, 0x6e, 0x16, 0xe7, 0x82, 0xc5, 0xd1, 0x7e, 0x73, 0xaf, 0xd1, 0xfc, 0x41, 0x51, 0x09, + 0x16, 0xfa, 0x49, 0xb3, 0x19, 0x2c, 0xe6, 0xd1, 0x25, 0xc8, 0x1f, 0x9f, 0xdc, 0xbd, 0xbb, 0xbf, + 0xbf, 0xb7, 0xbf, 0x57, 0x5c, 0x40, 0x00, 0xd9, 0xef, 0xef, 0x36, 0xee, 0xef, 0xef, 0x15, 0x33, + 0xf5, 0x9f, 0x3d, 0x7b, 0x5e, 0x56, 0xbe, 0x7a, 0x5e, 0x56, 0xfe, 0xf5, 0xbc, 0xac, 0x7c, 0xf6, + 0xa2, 0x3c, 0xf7, 0xd5, 0x8b, 0xf2, 0xdc, 0x3f, 0x5e, 0x94, 0xe7, 0x7e, 0x7a, 0x57, 0xfa, 0xde, + 0x28, 0xde, 0x7a, 0x7b, 0x2e, 0x0d, 0xce, 0x50, 0xb8, 0xaa, 0x5d, 0xe0, 0xc3, 0x6a, 0x2b, 0xcb, + 0x2f, 0x9e, 0xf7, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x1f, 0xf9, 0x4f, 0x86, 0x1d, 0x00, + 0x00, } func (m *Executor) Marshal() (dAtA []byte, err error) { @@ -1812,11 +1802,6 @@ func (m *JobSchedulingInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.QueueTtlSeconds != 0 { - i = encodeVarintSchedulerobjects(dAtA, i, uint64(m.QueueTtlSeconds)) - i-- - dAtA[i] = 0x50 - } if m.Version != 0 { i = encodeVarintSchedulerobjects(dAtA, i, uint64(m.Version)) i-- @@ -2412,9 +2397,6 @@ func (m *JobSchedulingInfo) Size() (n int) { if m.Version != 0 { n += 1 + sovSchedulerobjects(uint64(m.Version)) } - if m.QueueTtlSeconds != 0 { - n += 1 + sovSchedulerobjects(uint64(m.QueueTtlSeconds)) - } return n } @@ -5481,25 +5463,6 @@ func (m *JobSchedulingInfo) Unmarshal(dAtA []byte) error { break } } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) - } - m.QueueTtlSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSchedulerobjects - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueueTtlSeconds |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipSchedulerobjects(dAtA[iNdEx:]) diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.proto b/internal/scheduler/schedulerobjects/schedulerobjects.proto index d3b5aeab148..079d87ca371 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.proto +++ b/internal/scheduler/schedulerobjects/schedulerobjects.proto @@ -131,8 +131,7 @@ message JobSchedulingInfo { // Kubernetes objects that make up this job and their respective scheduling requirements. repeated ObjectRequirements object_requirements = 5; uint32 version = 9; - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - int64 queue_ttl_seconds = 10; + // ordinal 10 was previously used for queue_ttl_seconds } // Message capturing the scheduling requirements of a particular Kubernetes object. diff --git a/internal/scheduler/scheduling_algo.go b/internal/scheduler/scheduling_algo.go index 89d86880659..308fe160c64 100644 --- a/internal/scheduler/scheduling_algo.go +++ b/internal/scheduler/scheduling_algo.go @@ -4,6 +4,7 @@ import ( "context" "math/rand" "sort" + "strings" "time" "github.com/benbjohnson/immutable" @@ -12,26 +13,26 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/util" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/fairness" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/quarantine" + "github.com/armadaproject/armada/internal/scheduler/queue" "github.com/armadaproject/armada/internal/scheduler/reports" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/armadaproject/armada/pkg/client/queue" + "github.com/armadaproject/armada/pkg/api" ) // SchedulingAlgo is the interface between the Pulsar-backed scheduler and the @@ -46,7 +47,7 @@ type SchedulingAlgo interface { type FairSchedulingAlgo struct { schedulingConfig configuration.SchedulingConfig executorRepository database.ExecutorRepository - queueRepository repository.QueueRepository + queueCache queue.QueueCache schedulingContextRepository *reports.SchedulingContextRepository // Global job scheduling rate-limiter. limiter *rate.Limiter @@ -64,20 +65,22 @@ type FairSchedulingAlgo struct { // Function that is called every time an executor is scheduled. Useful for testing. onExecutorScheduled func(executor *schedulerobjects.Executor) // rand and clock injected here for repeatable testing. - rand *rand.Rand - clock clock.Clock - stringInterner *stringinterner.StringInterner + rand *rand.Rand + clock clock.Clock + stringInterner *stringinterner.StringInterner + resourceListFactory *internaltypes.ResourceListFactory } func NewFairSchedulingAlgo( config configuration.SchedulingConfig, maxSchedulingDuration time.Duration, executorRepository database.ExecutorRepository, - queueRepository repository.QueueRepository, + queueCache queue.QueueCache, schedulingContextRepository *reports.SchedulingContextRepository, nodeQuarantiner *quarantine.NodeQuarantiner, queueQuarantiner *quarantine.QueueQuarantiner, stringInterner *stringinterner.StringInterner, + resourceListFactory *internaltypes.ResourceListFactory, ) (*FairSchedulingAlgo, error) { if _, ok := config.PriorityClasses[config.DefaultPriorityClassName]; !ok { return nil, errors.Errorf( @@ -88,7 +91,7 @@ func NewFairSchedulingAlgo( return &FairSchedulingAlgo{ schedulingConfig: config, executorRepository: executorRepository, - queueRepository: queueRepository, + queueCache: queueCache, schedulingContextRepository: schedulingContextRepository, limiter: rate.NewLimiter(rate.Limit(config.MaximumSchedulingRate), config.MaximumSchedulingBurst), limiterByQueue: make(map[string]*rate.Limiter), @@ -99,6 +102,7 @@ func NewFairSchedulingAlgo( rand: util.NewThreadsafeRand(time.Now().UnixNano()), clock: clock.RealClock{}, stringInterner: stringInterner, + resourceListFactory: resourceListFactory, }, nil } @@ -137,6 +141,8 @@ func (l *FairSchedulingAlgo) Schedule( l.executorGroupsToSchedule = maps.Keys(executorGroups) sortExecutorGroups(l.executorGroupsToSchedule, l.schedulingConfig.PoolSchedulePriority, l.schedulingConfig.DefaultPoolSchedulePriority) } + + ctx.Infof("Looping over executor groups %s", strings.Join(maps.Keys(executorGroups), " ")) for len(l.executorGroupsToSchedule) > 0 { select { case <-ctx.Done(): @@ -148,6 +154,7 @@ func (l *FairSchedulingAlgo) Schedule( executorGroupLabel := armadaslices.Pop(&l.executorGroupsToSchedule) executorGroup := executorGroups[executorGroupLabel] if len(executorGroup) == 0 { + ctx.Infof("Skipping executor group %s as it has no executors", executorGroupLabel) continue } for _, executor := range executorGroup { @@ -164,6 +171,8 @@ func (l *FairSchedulingAlgo) Schedule( "scheduling on executor group %s with capacity %s", executorGroupLabel, fsctx.totalCapacityByPool[pool].CompactString(), ) + + start := time.Now() schedulerResult, sctx, err := l.scheduleOnExecutors( ctx, fsctx, @@ -171,6 +180,14 @@ func (l *FairSchedulingAlgo) Schedule( minimumJobSize, executorGroup, ) + + ctx.Infof( + "Scheduled on executor group %s in %v with error %v", + executorGroupLabel, + time.Now().Sub(start), + err, + ) + if err == context.DeadlineExceeded { // We've reached the scheduling time limit; // add the executorGroupLabel back to l.executorGroupsToSchedule such that we try it again next time, @@ -185,23 +202,19 @@ func (l *FairSchedulingAlgo) Schedule( l.schedulingContextRepository.StoreSchedulingContext(sctx) } - preemptedJobs := PreemptedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) - scheduledJobs := ScheduledJobsFromSchedulerResult[*jobdb.Job](schedulerResult) - failedJobs := FailedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) + preemptedJobs := PreemptedJobsFromSchedulerResult(schedulerResult) + scheduledJobs := ScheduledJobsFromSchedulerResult(schedulerResult) + if err := txn.Upsert(preemptedJobs); err != nil { return nil, err } if err := txn.Upsert(scheduledJobs); err != nil { return nil, err } - if err := txn.Upsert(failedJobs); err != nil { - return nil, err - } // Aggregate changes across executors. overallSchedulerResult.PreemptedJobs = append(overallSchedulerResult.PreemptedJobs, schedulerResult.PreemptedJobs...) overallSchedulerResult.ScheduledJobs = append(overallSchedulerResult.ScheduledJobs, schedulerResult.ScheduledJobs...) - overallSchedulerResult.FailedJobs = append(overallSchedulerResult.FailedJobs, schedulerResult.FailedJobs...) overallSchedulerResult.SchedulingContexts = append(overallSchedulerResult.SchedulingContexts, schedulerResult.SchedulingContexts...) maps.Copy(overallSchedulerResult.NodeIdByJobId, schedulerResult.NodeIdByJobId) maps.Copy(overallSchedulerResult.AdditionalAnnotationsByJobId, schedulerResult.AdditionalAnnotationsByJobId) @@ -229,7 +242,7 @@ type JobQueueIteratorAdapter struct { it *immutable.SortedSetIterator[*jobdb.Job] } -func (it *JobQueueIteratorAdapter) Next() (interfaces.LegacySchedulerJob, error) { +func (it *JobQueueIteratorAdapter) Next() (*jobdb.Job, error) { if it.it.Done() { return nil, nil } @@ -238,9 +251,9 @@ func (it *JobQueueIteratorAdapter) Next() (interfaces.LegacySchedulerJob, error) } type fairSchedulingAlgoContext struct { - queues []queue.Queue + queues []*api.Queue priorityFactorByQueue map[string]float64 - isActiveByQueueName map[string]bool + isActiveByPoolByQueue map[string]map[string]bool totalCapacityByPool schedulerobjects.QuantityByTAndResourceType[string] jobsByExecutorId map[string][]*jobdb.Job nodeIdByJobId map[string]string @@ -253,13 +266,20 @@ type fairSchedulingAlgoContext struct { func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Context, txn *jobdb.Txn) (*fairSchedulingAlgoContext, error) { executors, err := l.executorRepository.GetExecutors(ctx) + + executorToPool := make(map[string]string, len(executors)) + for _, executor := range executors { + executorToPool[executor.Id] = executor.Pool + } + allPools := maps.Values(executorToPool) + if err != nil { return nil, err } - executors = l.filterStaleExecutors(executors) + executors = l.filterStaleExecutors(ctx, executors) // TODO(albin): Skip queues with a high failure rate. - queues, err := l.queueRepository.GetAllQueues(ctx) + queues, err := l.queueCache.GetAll(ctx) if err != nil { return nil, err } @@ -277,13 +297,35 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con } // Create a map of jobs associated with each executor. - isActiveByQueueName := make(map[string]bool, len(queues)) + isActiveByPoolByQueue := make(map[string]map[string]bool, len(queues)) jobsByExecutorId := make(map[string][]*jobdb.Job) nodeIdByJobId := make(map[string]string) jobIdsByGangId := make(map[string]map[string]bool) gangIdByJobId := make(map[string]string) for _, job := range txn.GetAll() { - isActiveByQueueName[job.Queue()] = true + + // Mark a queue being active for a given pool. A queue is defined as being active if it has a job running + // on a pool or if a queued job is eligible for that pool + pools := job.Pools() + + if !job.Queued() && job.LatestRun() != nil { + pools = []string{executorToPool[job.LatestRun().Executor()]} + } else if len(pools) < 1 { + // This occurs if we haven't assigned a job to a pool. Right now this can occur if a user + // has upgraded from a version of armada where pools were not assigned statically. Eventually we + // Should be able to remove this + pools = allPools + } + + for _, pool := range pools { + isActiveByQueue, ok := isActiveByPoolByQueue[pool] + if !ok { + isActiveByQueue = make(map[string]bool, len(queues)) + } + isActiveByQueue[job.Queue()] = true + isActiveByPoolByQueue[pool] = isActiveByQueue + } + if job.Queued() { continue } @@ -329,7 +371,7 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con return &fairSchedulingAlgoContext{ queues: queues, priorityFactorByQueue: priorityFactorByQueue, - isActiveByQueueName: isActiveByQueueName, + isActiveByPoolByQueue: isActiveByPoolByQueue, totalCapacityByPool: totalCapacityByPool, jobsByExecutorId: jobsByExecutorId, nodeIdByJobId: nodeIdByJobId, @@ -351,12 +393,12 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( ) (*SchedulerResult, *schedulercontext.SchedulingContext, error) { nodeDb, err := nodedb.NewNodeDb( l.schedulingConfig.PriorityClasses, - l.schedulingConfig.MaxExtraNodesToConsider, l.schedulingConfig.IndexedResources, l.schedulingConfig.IndexedTaints, l.schedulingConfig.IndexedNodeLabels, l.schedulingConfig.WellKnownNodeTypes, l.stringInterner, + l.resourceListFactory, ) if err != nil { return nil, nil, err @@ -395,9 +437,14 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( totalResources, ) + activeByQueue, ok := fsctx.isActiveByPoolByQueue[pool] + if !ok { + activeByQueue = map[string]bool{} + } + now := time.Now() for queue, priorityFactor := range fsctx.priorityFactorByQueue { - if !fsctx.isActiveByQueueName[queue] { + if !activeByQueue[queue] { // To ensure fair share is computed only from active queues, i.e., queues with jobs queued or running. continue } @@ -441,8 +488,6 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( scheduler := NewPreemptingQueueScheduler( sctx, constraints, - l.schedulingConfig.NodeEvictionProbability, - l.schedulingConfig.NodeOversubscriptionEvictionProbability, l.schedulingConfig.ProtectedFractionOfFairShare, NewSchedulerJobRepositoryAdapter(fsctx.txn), nodeDb, @@ -462,7 +507,7 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( return nil, nil, err } for i, jctx := range result.PreemptedJobs { - jobDbJob := jctx.Job.(*jobdb.Job) + jobDbJob := jctx.Job if run := jobDbJob.LatestRun(); run != nil { jobDbJob = jobDbJob.WithUpdatedRun(run.WithFailed(true)) } else { @@ -471,8 +516,8 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( result.PreemptedJobs[i].Job = jobDbJob.WithQueued(false).WithFailed(true) } for i, jctx := range result.ScheduledJobs { - jobDbJob := jctx.Job.(*jobdb.Job) - jobId := jobDbJob.GetId() + jobDbJob := jctx.Job + jobId := jobDbJob.Id() nodeId := result.NodeIdByJobId[jobId] if nodeId == "" { return nil, nil, errors.Errorf("job %s not mapped to a node", jobId) @@ -490,15 +535,10 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( WithQueued(false). WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), priority) } - for i, jctx := range result.FailedJobs { - jobDbJob := jctx.Job.(*jobdb.Job) - result.FailedJobs[i].Job = jobDbJob.WithQueued(false).WithFailed(true) - } return result, sctx, nil } -// Adapter to make jobDb implement the JobRepository interface. -// +// SchedulerJobRepositoryAdapter allows jobDb implement the JobRepository interface. // TODO: Pass JobDb into the scheduler instead of using this shim to convert to a JobRepo. type SchedulerJobRepositoryAdapter struct { txn *jobdb.Txn @@ -512,25 +552,25 @@ func NewSchedulerJobRepositoryAdapter(txn *jobdb.Txn) *SchedulerJobRepositoryAda // GetQueueJobIds is necessary to implement the JobRepository interface, which we need while transitioning from the old // to new scheduler. -func (repo *SchedulerJobRepositoryAdapter) GetQueueJobIds(queue string) ([]string, error) { +func (repo *SchedulerJobRepositoryAdapter) GetQueueJobIds(queue string) []string { rv := make([]string, 0) it := repo.txn.QueuedJobs(queue) for v, _ := it.Next(); v != nil; v, _ = it.Next() { rv = append(rv, v.Id()) } - return rv, nil + return rv } // GetExistingJobsByIds is necessary to implement the JobRepository interface which we need while transitioning from the // old to new scheduler. -func (repo *SchedulerJobRepositoryAdapter) GetExistingJobsByIds(ids []string) ([]interfaces.LegacySchedulerJob, error) { - rv := make([]interfaces.LegacySchedulerJob, 0, len(ids)) +func (repo *SchedulerJobRepositoryAdapter) GetExistingJobsByIds(ids []string) []*jobdb.Job { + rv := make([]*jobdb.Job, 0, len(ids)) for _, id := range ids { if job := repo.txn.GetById(id); job != nil { rv = append(rv, job) } } - return rv, nil + return rv } // addExecutorToNodeDb adds all the nodes and jobs associated with a particular executor to the nodeDb. @@ -576,14 +616,14 @@ func (l *FairSchedulingAlgo) addExecutorToNodeDb(nodeDb *nodedb.NodeDb, jobs []* // filterStaleExecutors returns all executors which have sent a lease request within the duration given by l.schedulingConfig.ExecutorTimeout. // This ensures that we don't continue to assign jobs to executors that are no longer active. -func (l *FairSchedulingAlgo) filterStaleExecutors(executors []*schedulerobjects.Executor) []*schedulerobjects.Executor { +func (l *FairSchedulingAlgo) filterStaleExecutors(ctx *armadacontext.Context, executors []*schedulerobjects.Executor) []*schedulerobjects.Executor { activeExecutors := make([]*schedulerobjects.Executor, 0, len(executors)) cutoff := l.clock.Now().Add(-l.schedulingConfig.ExecutorTimeout) for _, executor := range executors { if executor.LastUpdateTime.After(cutoff) { activeExecutors = append(activeExecutors, executor) } else { - logrus.Debugf("Ignoring executor %s because it hasn't heartbeated since %s", executor.Id, executor.LastUpdateTime) + ctx.Infof("Ignoring executor %s because it hasn't heartbeated since %s", executor.Id, executor.LastUpdateTime) } } return activeExecutors @@ -653,7 +693,7 @@ func (l *FairSchedulingAlgo) aggregateAllocationByPoolAndQueueAndPriorityClass( allocation = make(schedulerobjects.QuantityByTAndResourceType[string]) allocationByQueue[queue] = allocation } - allocation.AddV1ResourceList(job.GetPriorityClassName(), job.GetResourceRequirements().Requests) + allocation.AddV1ResourceList(job.PriorityClassName(), job.ResourceRequirements().Requests) } } return rv diff --git a/internal/scheduler/scheduling_algo_test.go b/internal/scheduler/scheduling_algo_test.go index 6e5b800b6b3..c0b2e07cd3d 100644 --- a/internal/scheduler/scheduling_algo_test.go +++ b/internal/scheduler/scheduling_algo_test.go @@ -6,24 +6,23 @@ import ( "testing" "time" - "github.com/armadaproject/armada/pkg/client/queue" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/slices" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" + "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/reports" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" + "github.com/armadaproject/armada/pkg/api" ) func TestSchedule(t *testing.T) { @@ -35,7 +34,7 @@ func TestSchedule(t *testing.T) { schedulingConfig configuration.SchedulingConfig executors []*schedulerobjects.Executor - queues []queue.Queue + queues []*api.Queue queuedJobs []*jobdb.Job // Already scheduled jobs. Specifically, @@ -49,9 +48,6 @@ func TestSchedule(t *testing.T) { // Indices of queued jobs expected to be scheduled. expectedScheduledIndices []int - - // Count of jobs expected to fail - expectedFailedJobCount int }{ "scheduling": { schedulingConfig: testfixtures.TestSchedulingConfig(), @@ -59,7 +55,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), expectedScheduledIndices: []int{0, 1, 2, 3}, }, @@ -69,7 +65,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{ + queues: []*api.Queue{ { Name: "testQueueA", PriorityFactor: 100, @@ -91,7 +87,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.WithLastUpdateTimeExecutor(testfixtures.BaseTime.Add(-1*time.Hour), testfixtures.Test1Node32CoreExecutor("executor2")), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), expectedScheduledIndices: []int{0, 1}, }, @@ -101,7 +97,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N1Cpu4GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 48), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -119,7 +115,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N1Cpu4GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 48), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -137,7 +133,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -160,7 +156,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -182,7 +178,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -200,12 +196,12 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, }, "no executors": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{}, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), }, "computation of allocated resources does not confuse priority class with per-queue priority": { @@ -216,7 +212,7 @@ func TestSchedule(t *testing.T) { testfixtures.TestSchedulingConfig(), ), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: []*jobdb.Job{ // Submit the next job with a per-queue priority number (i.e., 1) that is larger // than the per-queue priority of the already-running job (i.e., 0), but smaller @@ -237,7 +233,7 @@ func TestSchedule(t *testing.T) { "urgency-based preemption within a single queue": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "A"}}, + queues: []*api.Queue{{Name: "A"}}, queuedJobs: testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass1, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -257,7 +253,7 @@ func TestSchedule(t *testing.T) { "urgency-based preemption between queues": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "A"}, {Name: "B"}}, + queues: []*api.Queue{{Name: "A"}, {Name: "B"}}, queuedJobs: testfixtures.N16Cpu128GiJobs("B", testfixtures.PriorityClass1, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -277,7 +273,7 @@ func TestSchedule(t *testing.T) { "preemption to fair share": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}, {Name: "B", PriorityFactor: 0.01}}, + queues: []*api.Queue{{Name: "A", PriorityFactor: 0.01}, {Name: "B", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -297,28 +293,17 @@ func TestSchedule(t *testing.T) { "gang scheduling successful": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, + queues: []*api.Queue{{Name: "A", PriorityFactor: 0.01}}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2)), expectedScheduledIndices: []int{0, 1}, }, - "gang scheduling successful with some jobs failing to schedule above min cardinality": { - schedulingConfig: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, - queuedJobs: testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 10), - ), - expectedScheduledIndices: []int{0, 1}, - expectedFailedJobCount: 8, - }, "not scheduling a gang that does not fit on any executor": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{ testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, + queues: []*api.Queue{{Name: "A", PriorityFactor: 0.01}}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("queue1", testfixtures.PriorityClass0, 3)), }, "urgency-based gang preemption": { @@ -326,7 +311,7 @@ func TestSchedule(t *testing.T) { executors: []*schedulerobjects.Executor{ testfixtures.Test1Node32CoreExecutor("executor1"), }, - queues: []queue.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, + queues: []*api.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("queue2", testfixtures.PriorityClass1, 1), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -346,7 +331,7 @@ func TestSchedule(t *testing.T) { "preemption to fair share evicting a gang": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []queue.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, + queues: []*api.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("queue2", testfixtures.PriorityClass0, 1), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -369,7 +354,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []queue.Queue{testfixtures.MakeTestQueue()}, + queues: []*api.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass0, 4)), expectedScheduledIndices: []int{0, 1, 2, 3}, }, @@ -381,19 +366,20 @@ func TestSchedule(t *testing.T) { ctrl := gomock.NewController(t) mockExecutorRepo := schedulermocks.NewMockExecutorRepository(ctrl) mockExecutorRepo.EXPECT().GetExecutors(ctx).Return(tc.executors, nil).AnyTimes() - mockQueueRepo := schedulermocks.NewMockQueueRepository(ctrl) - mockQueueRepo.EXPECT().GetAllQueues(ctx).Return(tc.queues, nil).AnyTimes() + mockQueueCache := schedulermocks.NewMockQueueCache(ctrl) + mockQueueCache.EXPECT().GetAll(ctx).Return(tc.queues, nil).AnyTimes() schedulingContextRepo := reports.NewSchedulingContextRepository() sch, err := NewFairSchedulingAlgo( tc.schedulingConfig, 0, mockExecutorRepo, - mockQueueRepo, + mockQueueCache, schedulingContextRepo, nil, nil, stringinterner.New(1024), + testfixtures.TestResourceListFactory, ) require.NoError(t, err) @@ -432,7 +418,7 @@ func TestSchedule(t *testing.T) { } // Setup jobDb. - jobDb := testfixtures.NewJobDb() + jobDb := testfixtures.NewJobDb(testfixtures.TestResourceListFactory) txn := jobDb.WriteTxn() err = txn.Upsert(jobsToUpsert) require.NoError(t, err) @@ -442,7 +428,7 @@ func TestSchedule(t *testing.T) { require.NoError(t, err) // Check that the expected preemptions took place. - preemptedJobs := PreemptedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) + preemptedJobs := PreemptedJobsFromSchedulerResult(schedulerResult) actualPreemptedJobsByExecutorIndexAndNodeIndex := make(map[int]map[int][]int) for _, job := range preemptedJobs { executorIndex := executorIndexByJobId[job.Id()] @@ -467,7 +453,7 @@ func TestSchedule(t *testing.T) { } // Check that jobs were scheduled as expected. - scheduledJobs := ScheduledJobsFromSchedulerResult[*jobdb.Job](schedulerResult) + scheduledJobs := ScheduledJobsFromSchedulerResult(schedulerResult) actualScheduledIndices := make([]int, 0) for _, job := range scheduledJobs { actualScheduledIndices = append(actualScheduledIndices, queueIndexByJobId[job.Id()]) @@ -478,14 +464,6 @@ func TestSchedule(t *testing.T) { } else { assert.Equal(t, tc.expectedScheduledIndices, actualScheduledIndices) } - // Sanity check: we've set `GangNumJobsScheduledAnnotation` for all scheduled jobs. - for _, job := range scheduledJobs { - assert.Contains(t, schedulerResult.AdditionalAnnotationsByJobId[job.Id()], configuration.GangNumJobsScheduledAnnotation) - } - - // Check that we failed the correct number of excess jobs when a gang schedules >= minimum cardinality - failedJobs := FailedJobsFromSchedulerResult[*jobdb.Job](schedulerResult) - assert.Equal(t, tc.expectedFailedJobCount, len(failedJobs)) // Check that preempted jobs are marked as such consistently. for _, job := range preemptedJobs { @@ -505,13 +483,6 @@ func TestSchedule(t *testing.T) { assert.NotEmpty(t, dbRun.NodeName()) } - // Check that failed jobs are marked as such consistently. - for _, job := range failedJobs { - dbJob := txn.GetById(job.Id()) - assert.True(t, dbJob.Failed()) - assert.False(t, dbJob.Queued()) - } - // Check that jobDb was updated correctly. // TODO: Check that there are no unexpected jobs in the jobDb. for _, job := range preemptedJobs { @@ -522,10 +493,6 @@ func TestSchedule(t *testing.T) { dbJob := txn.GetById(job.Id()) assert.True(t, job.Equal(dbJob), "expected %v but got %v", job, dbJob) } - for _, job := range failedJobs { - dbJob := txn.GetById(job.Id()) - assert.True(t, job.Equal(dbJob), "expected %v but got %v", job, dbJob) - } }) } } @@ -556,18 +523,19 @@ func BenchmarkNodeDbConstruction(b *testing.B) { nil, nil, stringInterner, + testfixtures.TestResourceListFactory, ) require.NoError(b, err) b.StartTimer() nodeDb, err := nodedb.NewNodeDb( schedulingConfig.PriorityClasses, - schedulingConfig.MaxExtraNodesToConsider, schedulingConfig.IndexedResources, schedulingConfig.IndexedTaints, schedulingConfig.IndexedNodeLabels, schedulingConfig.WellKnownNodeTypes, stringInterner, + testfixtures.TestResourceListFactory, ) require.NoError(b, err) err = algo.addExecutorToNodeDb(nodeDb, jobs, nodes) diff --git a/internal/scheduler/simulator/runner.go b/internal/scheduler/simulator/runner.go index d69884890fa..eb668d1f13a 100644 --- a/internal/scheduler/simulator/runner.go +++ b/internal/scheduler/simulator/runner.go @@ -9,8 +9,8 @@ import ( "github.com/pkg/errors" "github.com/spf13/viper" - "github.com/armadaproject/armada/internal/armada/configuration" commonconfig "github.com/armadaproject/armada/internal/common/config" + "github.com/armadaproject/armada/internal/scheduler/configuration" ) func SchedulingConfigsByFilePathFromPattern(pattern string) (map[string]configuration.SchedulingConfig, error) { diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index 39b468e05ce..592d71240fc 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -15,7 +15,6 @@ import ( "golang.org/x/time/rate" v1 "k8s.io/api/core/v1" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" @@ -23,12 +22,14 @@ import ( "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler" + "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/fairness" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" - schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduleringester" "github.com/armadaproject/armada/pkg/armadaevents" ) @@ -79,6 +80,14 @@ type Simulator struct { // If true, scheduler logs are omitted. // This since the logs are very verbose when scheduling large numbers of jobs. SuppressSchedulerLogs bool + // For making internaltypes.ResourceList + resourceListFactory *internaltypes.ResourceListFactory + // Skips schedule events when we're in a steady state + enableFastForward bool + // Limit the time simulated + hardTerminationMinutes int + // Determines how often we trigger schedule events + schedulerCyclePeriodSeconds int } type StateTransition struct { @@ -86,9 +95,13 @@ type StateTransition struct { EventSequence *armadaevents.EventSequence } -func NewSimulator(clusterSpec *ClusterSpec, workloadSpec *WorkloadSpec, schedulingConfig configuration.SchedulingConfig) (*Simulator, error) { +func NewSimulator(clusterSpec *ClusterSpec, workloadSpec *WorkloadSpec, schedulingConfig configuration.SchedulingConfig, enableFastForward bool, hardTerminationMinutes int, schedulerCyclePeriodSeconds int) (*Simulator, error) { // TODO: Move clone to caller? // Copy specs to avoid concurrent mutation. + resourceListFactory, err := internaltypes.MakeResourceListFactory(schedulingConfig.SupportedResourceTypes) + if err != nil { + return nil, errors.WithMessage(err, "Error with the .scheduling.supportedResourceTypes field in config") + } clusterSpec = proto.Clone(clusterSpec).(*ClusterSpec) workloadSpec = proto.Clone(workloadSpec).(*WorkloadSpec) initialiseClusterSpec(clusterSpec) @@ -103,10 +116,11 @@ func NewSimulator(clusterSpec *ClusterSpec, workloadSpec *WorkloadSpec, scheduli schedulingConfig.PriorityClasses, schedulingConfig.DefaultPriorityClassName, stringinterner.New(1024), + resourceListFactory, ) randomSeed := workloadSpec.RandomSeed if randomSeed == 0 { - // Seed the RNG using the local time if no explic random seed is provided. + // Seed the RNG using the local time if no explicit random seed is provided. randomSeed = time.Now().Unix() } s := &Simulator{ @@ -126,8 +140,12 @@ func NewSimulator(clusterSpec *ClusterSpec, workloadSpec *WorkloadSpec, scheduli rate.Limit(schedulingConfig.MaximumSchedulingRate), schedulingConfig.MaximumSchedulingBurst, ), - limiterByQueue: make(map[string]*rate.Limiter), - rand: rand.New(rand.NewSource(randomSeed)), + limiterByQueue: make(map[string]*rate.Limiter), + rand: rand.New(rand.NewSource(randomSeed)), + resourceListFactory: resourceListFactory, + enableFastForward: enableFastForward, + hardTerminationMinutes: hardTerminationMinutes, + schedulerCyclePeriodSeconds: schedulerCyclePeriodSeconds, } jobDb.SetClock(s) s.limiter.SetBurstAt(s.time, schedulingConfig.MaximumSchedulingBurst) @@ -157,6 +175,9 @@ func (s *Simulator) Run(ctx *armadacontext.Context) error { }() // Bootstrap the simulator by pushing an event that triggers a scheduler run. s.pushScheduleEvent(s.time) + + simTerminationTime := s.time.Add(time.Minute * time.Duration(s.hardTerminationMinutes)) + // Then run the scheduler until all jobs have completed. for s.eventLog.Len() > 0 { select { @@ -168,6 +189,10 @@ func (s *Simulator) Run(ctx *armadacontext.Context) error { return err } } + if s.time.After(simTerminationTime) { + ctx.Infof("Current simulated time (%s) exceeds runtime deadline (%s). Terminating", s.time, simTerminationTime) + return nil + } } return nil } @@ -181,7 +206,7 @@ func (s *Simulator) StateTransitions() <-chan StateTransition { } func validateClusterSpec(clusterSpec *ClusterSpec) error { - poolNames := util.Map(clusterSpec.Pools, func(pool *Pool) string { return pool.Name }) + poolNames := armadaslices.Map(clusterSpec.Pools, func(pool *Pool) string { return pool.Name }) if !slices.Equal(poolNames, armadaslices.Unique(poolNames)) { return errors.Errorf("duplicate pool name: %v", poolNames) } @@ -201,12 +226,12 @@ func validateClusterSpec(clusterSpec *ClusterSpec) error { } func validateWorkloadSpec(workloadSpec *WorkloadSpec) error { - queueNames := util.Map(workloadSpec.Queues, func(queue *Queue) string { return queue.Name }) + queueNames := armadaslices.Map(workloadSpec.Queues, func(queue *Queue) string { return queue.Name }) if !slices.Equal(queueNames, armadaslices.Unique(queueNames)) { return errors.Errorf("duplicate queue name: %v", queueNames) } - jobTemplateIdSlices := util.Map(workloadSpec.Queues, func(queue *Queue) []string { - return util.Map(queue.JobTemplates, func(template *JobTemplate) string { return template.Id }) + jobTemplateIdSlices := armadaslices.Map(workloadSpec.Queues, func(queue *Queue) []string { + return armadaslices.Map(queue.JobTemplates, func(template *JobTemplate) string { return template.Id }) }) jobTemplateIds := make([]string, 0) for _, singleQueueTemplateIds := range jobTemplateIdSlices { @@ -225,12 +250,12 @@ func (s *Simulator) setupClusters() error { for executorGroupIndex, executorGroup := range pool.ClusterGroups { nodeDb, err := nodedb.NewNodeDb( s.schedulingConfig.PriorityClasses, - s.schedulingConfig.MaxExtraNodesToConsider, s.schedulingConfig.IndexedResources, s.schedulingConfig.IndexedTaints, s.schedulingConfig.IndexedNodeLabels, s.schedulingConfig.WellKnownNodeTypes, stringinterner.New(1024), + s.resourceListFactory, ) if err != nil { return err @@ -254,7 +279,7 @@ func (s *Simulator) setupClusters() error { ), } txn := nodeDb.Txn(true) - if err := nodeDb.CreateAndInsertWithApiJobsWithTxn(txn, nil, node); err != nil { + if err := nodeDb.CreateAndInsertWithJobDbJobsWithTxn(txn, nil, node); err != nil { txn.Abort() return err } @@ -409,9 +434,9 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { // Schedule the next run of the scheduler, unless there are no more active jobTemplates. // TODO: Make timeout configurable. if len(s.activeJobTemplatesById) > 0 { - s.pushScheduleEvent(s.time.Add(10 * time.Second)) + s.pushScheduleEvent(s.time.Add(time.Duration(s.schedulerCyclePeriodSeconds) * time.Second)) } - if !s.shouldSchedule { + if !s.shouldSchedule && s.enableFastForward { return nil } @@ -474,8 +499,6 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { sch := scheduler.NewPreemptingQueueScheduler( sctx, constraints, - s.schedulingConfig.NodeEvictionProbability, - s.schedulingConfig.NodeOversubscriptionEvictionProbability, s.schedulingConfig.ProtectedFractionOfFairShare, scheduler.NewSchedulerJobRepositoryAdapter(txn), nodeDb, @@ -499,9 +522,8 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { // Update jobDb to reflect the decisions by the scheduler. // Sort jobs to ensure deterministic event ordering. - preemptedJobs := scheduler.PreemptedJobsFromSchedulerResult[*jobdb.Job](result) + preemptedJobs := scheduler.PreemptedJobsFromSchedulerResult(result) scheduledJobs := slices.Clone(result.ScheduledJobs) - failedJobs := scheduler.FailedJobsFromSchedulerResult[*jobdb.Job](result) lessJob := func(a, b *jobdb.Job) int { if a.Queue() < b.Queue() { return -1 @@ -517,9 +539,8 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { } slices.SortFunc(preemptedJobs, lessJob) slices.SortFunc(scheduledJobs, func(a, b *schedulercontext.JobSchedulingContext) int { - return lessJob(a.Job.(*jobdb.Job), b.Job.(*jobdb.Job)) + return lessJob(a.Job, b.Job) }) - slices.SortFunc(failedJobs, lessJob) for i, job := range preemptedJobs { if run := job.LatestRun(); run != nil { job = job.WithUpdatedRun(run.WithFailed(true)) @@ -529,10 +550,10 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { preemptedJobs[i] = job.WithQueued(false).WithFailed(true) } for i, jctx := range scheduledJobs { - job := jctx.Job.(*jobdb.Job) - nodeId := result.NodeIdByJobId[job.GetId()] + job := jctx.Job + nodeId := result.NodeIdByJobId[job.Id()] if nodeId == "" { - return errors.Errorf("job %s not mapped to a node", job.GetId()) + return errors.Errorf("job %s not mapped to a node", job.Id()) } if node, err := nodeDb.GetNode(nodeId); err != nil { return err @@ -544,19 +565,10 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { scheduledJobs[i].Job = job.WithQueued(false).WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), priority) } } - for i, job := range failedJobs { - if run := job.LatestRun(); run != nil { - job = job.WithUpdatedRun(run.WithFailed(true)) - } - failedJobs[i] = job.WithQueued(false).WithFailed(true) - } if err := txn.Upsert(preemptedJobs); err != nil { return err } - if err := txn.Upsert(util.Map(scheduledJobs, func(jctx *schedulercontext.JobSchedulingContext) *jobdb.Job { return jctx.Job.(*jobdb.Job) })); err != nil { - return err - } - if err := txn.Upsert(failedJobs); err != nil { + if err := txn.Upsert(armadaslices.Map(scheduledJobs, func(jctx *schedulercontext.JobSchedulingContext) *jobdb.Job { return jctx.Job })); err != nil { return err } @@ -573,10 +585,6 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { if err != nil { return err } - eventSequences, err = scheduler.AppendEventSequencesFromUnschedulableJobs(eventSequences, failedJobs, s.time) - if err != nil { - return err - } // Update event timestamps to be consistent with simulated time. t := s.time @@ -588,7 +596,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { // If nothing changed, we're in steady state and can safely skip scheduling until something external has changed. // Do this only if a non-zero amount of time has passed. - if !s.time.Equal(time.Time{}) && len(result.ScheduledJobs) == 0 && len(result.PreemptedJobs) == 0 && len(result.FailedJobs) == 0 { + if !s.time.Equal(time.Time{}) && len(result.ScheduledJobs) == 0 && len(result.PreemptedJobs) == 0 { s.shouldSchedule = false } } @@ -667,7 +675,7 @@ func (s *Simulator) handleSubmitJob(txn *jobdb.Txn, e *armadaevents.SubmitJob, t if err != nil { return nil, false, err } - job := s.jobDb.NewJob( + job, err := s.jobDb.NewJob( armadaevents.UlidFromProtoUuid(e.JobId).String(), eventSequence.JobSetName, eventSequence.Queue, @@ -679,7 +687,12 @@ func (s *Simulator) handleSubmitJob(txn *jobdb.Txn, e *armadaevents.SubmitJob, t false, false, s.logicalJobCreatedTimestamp.Add(1), + false, + []string{}, ) + if err != nil { + return nil, false, err + } if err := txn.Upsert([]*jobdb.Job{job}); err != nil { return nil, false, err } @@ -747,8 +760,8 @@ func (s *Simulator) handleJobSucceeded(txn *jobdb.Txn, e *armadaevents.JobSuccee run := job.LatestRun() pool := s.poolByNodeId[run.NodeId()] s.allocationByPoolAndQueueAndPriorityClass[pool][job.Queue()].SubV1ResourceList( - job.GetPriorityClassName(), - job.GetResourceRequirements().Requests, + job.PriorityClassName(), + job.ResourceRequirements().Requests, ) // Unbind the job from the node on which it was scheduled. @@ -759,7 +772,7 @@ func (s *Simulator) handleJobSucceeded(txn *jobdb.Txn, e *armadaevents.JobSuccee // Increase the successful job count for this jobTemplate. // If all jobs created from this template have succeeded, update dependent templates // and submit any templates for which this was the last dependency. - jobTemplate := s.jobTemplateByJobId[job.GetId()] + jobTemplate := s.jobTemplateByJobId[job.Id()] jobTemplate.NumberSuccessful++ if jobTemplate.Number == jobTemplate.NumberSuccessful { delete(s.activeJobTemplatesById, jobTemplate.Id) @@ -832,7 +845,7 @@ func (s *Simulator) handleJobRunPreempted(txn *jobdb.Txn, e *armadaevents.JobRun job := txn.GetById(jobId) // Submit a retry for this job. - jobTemplate := s.jobTemplateByJobId[job.GetId()] + jobTemplate := s.jobTemplateByJobId[job.Id()] retryJobId := util.ULID() resubmitTime := s.time.Add(s.generateRandomShiftedExponentialDuration(s.ClusterSpec.WorkflowManagerDelayDistribution)) s.pushEventSequence( diff --git a/internal/scheduler/simulator/simulator_test.go b/internal/scheduler/simulator/simulator_test.go index 67489faf352..264757139e7 100644 --- a/internal/scheduler/simulator/simulator_test.go +++ b/internal/scheduler/simulator/simulator_test.go @@ -10,17 +10,18 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/common/util" - schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" "github.com/armadaproject/armada/pkg/armadaevents" ) func TestSimulator(t *testing.T) { + enableFastForward := false + schedulerCyclePeriodSeconds := 10 tests := map[string]struct { clusterSpec *ClusterSpec workloadSpec *WorkloadSpec @@ -435,7 +436,7 @@ func TestSimulator(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - s, err := NewSimulator(tc.clusterSpec, tc.workloadSpec, tc.schedulingConfig) + s, err := NewSimulator(tc.clusterSpec, tc.workloadSpec, tc.schedulingConfig, enableFastForward, int((tc.simulatedTimeLimit + time.Hour).Minutes()), schedulerCyclePeriodSeconds) require.NoError(t, err) mc := NewMetricsCollector(s.StateTransitions()) actualEventSequences := make([]*armadaevents.EventSequence, 0, 128) @@ -470,8 +471,8 @@ func TestSimulator(t *testing.T) { if tc.expectedEventSequences != nil { require.Equal( t, - util.Map(tc.expectedEventSequences, func(eventSequence *armadaevents.EventSequence) string { return EventSequenceSummary(eventSequence) }), - util.Map(actualEventSequences, func(eventSequence *armadaevents.EventSequence) string { return EventSequenceSummary(eventSequence) }), + armadaslices.Map(tc.expectedEventSequences, func(eventSequence *armadaevents.EventSequence) string { return EventSequenceSummary(eventSequence) }), + armadaslices.Map(actualEventSequences, func(eventSequence *armadaevents.EventSequence) string { return EventSequenceSummary(eventSequence) }), "Expected:\n%s\nReceived:\n%s", EventSequencesSummary(tc.expectedEventSequences), EventSequencesSummary(actualEventSequences), diff --git a/internal/scheduler/simulator/test_utils.go b/internal/scheduler/simulator/test_utils.go index c44ba04a1e8..18d8a9d17b0 100644 --- a/internal/scheduler/simulator/test_utils.go +++ b/internal/scheduler/simulator/test_utils.go @@ -9,9 +9,9 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/armadaproject/armada/internal/armada/configuration" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/types" + "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/constraints" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/pkg/armadaevents" @@ -45,7 +45,6 @@ func GetOneQueue10JobWorkload() *WorkloadSpec { func GetBasicSchedulingConfig() configuration.SchedulingConfig { return configuration.SchedulingConfig{ - NodeEvictionProbability: 1.0, PriorityClasses: map[string]types.PriorityClass{ "armada-default": { Priority: 30000, @@ -62,7 +61,7 @@ func GetBasicSchedulingConfig() configuration.SchedulingConfig { "cpu": 0.025, }, DominantResourceFairnessResourcesToConsider: []string{"cpu", "memory", "nvidia.com/gpu", "ephemeral-storage"}, - IndexedResources: []configuration.IndexedResource{ + IndexedResources: []configuration.ResourceType{ { Name: "cpu", Resolution: resource.MustParse("1"), diff --git a/internal/scheduler/simulator/writer.go b/internal/scheduler/simulator/writer.go index 95ad7a5f425..fb436cf9057 100644 --- a/internal/scheduler/simulator/writer.go +++ b/internal/scheduler/simulator/writer.go @@ -89,12 +89,12 @@ func (w *Writer) flattenStateTransition(flattenedStateTransitions []*FlattenedAr for i, event := range events.Events { // Assumes all supported events have an associated job associatedJob := jobsList[i] - prevSeenEvent := w.prevSeenEventByJobId[associatedJob.GetId()] + prevSeenEvent := w.prevSeenEventByJobId[associatedJob.Id()] // Resource requirements - cpuLimit := associatedJob.GetResourceRequirements().Requests[v1.ResourceCPU] - memoryLimit := associatedJob.GetResourceRequirements().Requests[v1.ResourceMemory] - ephemeralStorageLimit := associatedJob.GetResourceRequirements().Requests[v1.ResourceEphemeralStorage] - gpuLimit := associatedJob.GetResourceRequirements().Requests["nvidia.com/gpu"] + cpuLimit := associatedJob.ResourceRequirements().Requests[v1.ResourceCPU] + memoryLimit := associatedJob.ResourceRequirements().Requests[v1.ResourceMemory] + ephemeralStorageLimit := associatedJob.ResourceRequirements().Requests[v1.ResourceEphemeralStorage] + gpuLimit := associatedJob.ResourceRequirements().Requests["nvidia.com/gpu"] prevEventType := 0 prevEventTime := *event.Created @@ -107,10 +107,10 @@ func (w *Writer) flattenStateTransition(flattenedStateTransitions []*FlattenedAr Time: event.Created.Sub(startTime).Milliseconds(), Queue: events.Queue, JobSet: events.JobSetName, - JobId: associatedJob.GetId(), + JobId: associatedJob.Id(), RunIndex: len(associatedJob.AllRuns()) - 1, // Assumed to be related to latest run in simulation NumRuns: len(associatedJob.AllRuns()), - PriorityClass: associatedJob.GetPriorityClassName(), + PriorityClass: associatedJob.PriorityClassName(), PreviousEventType: prevEventType, EventType: w.encodeEvent(event), SecondsSinceLastEvent: event.Created.Sub(prevEventTime).Seconds(), @@ -120,10 +120,10 @@ func (w *Writer) flattenStateTransition(flattenedStateTransitions []*FlattenedAr EphemeralStorage: ephemeralStorageLimit.AsApproximateFloat64(), ExitCode: 0, }) - w.prevSeenEventByJobId[associatedJob.GetId()] = event + w.prevSeenEventByJobId[associatedJob.Id()] = event if associatedJob.Succeeded() || associatedJob.Failed() || associatedJob.Cancelled() { - delete(w.prevSeenEventByJobId, associatedJob.GetId()) + delete(w.prevSeenEventByJobId, associatedJob.Id()) } } diff --git a/internal/scheduler/submitcheck.go b/internal/scheduler/submitcheck.go index 438c53a445e..78843e0c13f 100644 --- a/internal/scheduler/submitcheck.go +++ b/internal/scheduler/submitcheck.go @@ -3,96 +3,85 @@ package scheduler import ( "fmt" "strings" - "sync" + "sync/atomic" "time" lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" "golang.org/x/exp/maps" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/adapters" + "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/database" - "github.com/armadaproject/armada/internal/scheduler/interfaces" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" - "github.com/armadaproject/armada/pkg/armadaevents" ) -type minimalExecutor struct { - nodeDb *nodedb.NodeDb - updateTime time.Time -} - type schedulingResult struct { isSchedulable bool + pools []string reason string } -const maxJobSchedulingResults = 10000 +// TODO: rename this to "executor" when we simplify pool assigner +type executorDetails struct { + pool string + nodeDb *nodedb.NodeDb + minimumJobSize schedulerobjects.ResourceList +} + +type executorState struct { + executorsById map[string]*executorDetails + jobSchedulingResultsCache *lru.Cache +} type SubmitScheduleChecker interface { - CheckApiJobs(es *armadaevents.EventSequence, defaultPriorityClass string) (bool, string) - CheckJobDbJobs(jobs []*jobdb.Job) (bool, string) + Check(ctx *armadacontext.Context, jobs []*jobdb.Job) (map[string]schedulingResult, error) +} + +// DummySubmitChecker is a SubmitScheduleChecker that allows every job +type DummySubmitChecker struct{} + +func (srv *DummySubmitChecker) Check(_ *armadacontext.Context, jobs []*jobdb.Job) (map[string]schedulingResult, error) { + results := make(map[string]schedulingResult, len(jobs)) + for _, job := range jobs { + results[job.Id()] = schedulingResult{isSchedulable: true} + } + return results, nil } type SubmitChecker struct { - executorTimeout time.Duration - priorityClasses map[string]types.PriorityClass - executorById map[string]minimalExecutor - priorities []int32 - indexedResources []configuration.IndexedResource - indexedTaints []string - indexedNodeLabels []string - wellKnownNodeTypes []configuration.WellKnownNodeType - executorRepository database.ExecutorRepository - clock clock.Clock - mu sync.Mutex - schedulingKeyGenerator *schedulerobjects.SchedulingKeyGenerator - jobSchedulingResultsCache *lru.Cache - ExecutorUpdateFrequency time.Duration + schedulingConfig configuration.SchedulingConfig + executorRepository database.ExecutorRepository + resourceListFactory *internaltypes.ResourceListFactory + state atomic.Pointer[executorState] + clock clock.Clock // can be overridden for testing } func NewSubmitChecker( - executorTimeout time.Duration, schedulingConfig configuration.SchedulingConfig, executorRepository database.ExecutorRepository, + resourceListFactory *internaltypes.ResourceListFactory, ) *SubmitChecker { - jobSchedulingResultsCache, err := lru.New(maxJobSchedulingResults) - if err != nil { - panic(errors.WithStack(err)) - } return &SubmitChecker{ - executorTimeout: executorTimeout, - priorityClasses: schedulingConfig.PriorityClasses, - executorById: map[string]minimalExecutor{}, - priorities: types.AllowedPriorities(schedulingConfig.PriorityClasses), - indexedResources: schedulingConfig.IndexedResources, - indexedTaints: schedulingConfig.IndexedTaints, - indexedNodeLabels: schedulingConfig.IndexedNodeLabels, - wellKnownNodeTypes: schedulingConfig.WellKnownNodeTypes, - executorRepository: executorRepository, - clock: clock.RealClock{}, - schedulingKeyGenerator: schedulerobjects.NewSchedulingKeyGenerator(), - jobSchedulingResultsCache: jobSchedulingResultsCache, - ExecutorUpdateFrequency: schedulingConfig.ExecutorUpdateFrequency, + schedulingConfig: schedulingConfig, + executorRepository: executorRepository, + resourceListFactory: resourceListFactory, + clock: clock.RealClock{}, } } -var stringInterner = stringinterner.New(1000) - func (srv *SubmitChecker) Run(ctx *armadacontext.Context) error { + ctx.Infof("Will refresh executor state every %s", srv.schedulingConfig.ExecutorUpdateFrequency) srv.updateExecutors(ctx) - - ticker := time.NewTicker(srv.ExecutorUpdateFrequency) + ticker := time.NewTicker(srv.schedulingConfig.ExecutorUpdateFrequency) for { select { case <-ctx.Done(): @@ -111,76 +100,52 @@ func (srv *SubmitChecker) updateExecutors(ctx *armadacontext.Context) { Error("Error fetching executors") return } - for _, executor := range executors { - nodeDb, err := srv.constructNodeDb(executor.Nodes) + ctx.Infof("Retrieved %d executors", len(executors)) + jobSchedulingResultsCache, err := lru.New(10000) + if err != nil { + // This should never happen as lru.New only returns an error if it is initialised with a negative size + panic(err) + } + + executorsById := map[string]*executorDetails{} + for _, ex := range executors { + nodeDb, err := srv.constructNodeDb(ex) if err == nil { - srv.mu.Lock() - srv.executorById[executor.Id] = minimalExecutor{ - nodeDb: nodeDb, - updateTime: executor.LastUpdateTime, - } - srv.mu.Unlock() - if err != nil { - logging. - WithStacktrace(ctx, err). - Errorf("Error constructing node db for executor %s", executor.Id) + executorsById[ex.Id] = &executorDetails{ + pool: ex.Pool, + nodeDb: nodeDb, + minimumJobSize: ex.MinimumJobSize, } } else { logging. WithStacktrace(ctx, err). - Warnf("Error clearing nodedb for executor %s", executor.Id) + Warnf("Error constructing nodedb for executor: %s", ex.Id) } } - - // Reset cache as the executors may have updated, changing what can be scheduled. - // Create a new schedulingKeyGenerator to get a new initial state. - srv.schedulingKeyGenerator = schedulerobjects.NewSchedulingKeyGenerator() - srv.jobSchedulingResultsCache.Purge() -} - -func (srv *SubmitChecker) CheckJobDbJobs(jobs []*jobdb.Job) (bool, string) { - return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs)) + srv.state.Store(&executorState{ + executorsById: executorsById, + jobSchedulingResultsCache: jobSchedulingResultsCache, + }) } -func (srv *SubmitChecker) CheckApiJobs(es *armadaevents.EventSequence, defaultPriorityClass string) (bool, string) { - jobDb := jobdb.NewJobDb(srv.priorityClasses, defaultPriorityClass, stringInterner) - jobs := make([]*jobdb.Job, 0, len(es.Events)) - for _, event := range es.Events { - submitMsg := event.GetSubmitJob() - if submitMsg != nil { - schedInfo, err := adapters.SchedulingInfoFromSubmitJob(submitMsg, time.Now(), srv.priorityClasses) - if err != nil { - return false, err.Error() - } - job := jobDb.NewJob( - armadaevents.MustUlidStringFromProtoUuid(submitMsg.JobId), - es.JobSetName, - es.Queue, - submitMsg.Priority, - schedInfo, - true, - 0, - false, - false, - false, - 0) - jobs = append(jobs, job) - } +func (srv *SubmitChecker) Check(ctx *armadacontext.Context, jobs []*jobdb.Job) (map[string]schedulingResult, error) { + start := time.Now() + state := srv.state.Load() + if state == nil { + return nil, fmt.Errorf("executor state not loaded") } - return srv.check(schedulercontext.JobSchedulingContextsFromJobs(srv.priorityClasses, jobs)) -} -func (srv *SubmitChecker) check(jctxs []*schedulercontext.JobSchedulingContext) (bool, string) { + jobContexts := schedulercontext.JobSchedulingContextsFromJobs(srv.schedulingConfig.PriorityClasses, jobs) + results := make(map[string]schedulingResult, len(jobs)) + // First, check if all jobs can be scheduled individually. - for i, jctx := range jctxs { - schedulingResult := srv.getIndividualSchedulingResult(jctx) - if !schedulingResult.isSchedulable { - return schedulingResult.isSchedulable, fmt.Sprintf("%d-th job unschedulable:\n%s", i, schedulingResult.reason) - } + for _, jctx := range jobContexts { + results[jctx.JobId] = srv.getIndividualSchedulingResult(jctx, state) } + // Then, check if all gangs can be scheduled. for gangId, jctxs := range armadaslices.GroupByFunc( - jctxs, + jobContexts, func(jctx *schedulercontext.JobSchedulingContext) string { return jctx.GangInfo.Id }, @@ -189,22 +154,20 @@ func (srv *SubmitChecker) check(jctxs []*schedulercontext.JobSchedulingContext) continue } gctx := schedulercontext.NewGangSchedulingContext(jctxs) - if schedulingResult := srv.getSchedulingResult(gctx); !schedulingResult.isSchedulable { - return schedulingResult.isSchedulable, fmt.Sprintf("gang %s is unschedulable:\n%s", gangId, schedulingResult.reason) + if result := srv.getSchedulingResult(gctx, state); !result.isSchedulable { + for _, jctx := range gctx.JobSchedulingContexts { + results[jctx.JobId] = result + } } } - return true, "" + ctx.Infof("Checked %d jobs in %s", len(jobs), time.Since(start)) + return results, nil } -func (srv *SubmitChecker) getIndividualSchedulingResult(jctx *schedulercontext.JobSchedulingContext) schedulingResult { - schedulingKey, ok := jctx.Job.GetSchedulingKey() - if !ok { - srv.mu.Lock() - schedulingKey = interfaces.SchedulingKeyFromLegacySchedulerJob(srv.schedulingKeyGenerator, jctx.Job) - srv.mu.Unlock() - } +func (srv *SubmitChecker) getIndividualSchedulingResult(jctx *schedulercontext.JobSchedulingContext, state *executorState) schedulingResult { + schedulingKey := jctx.Job.SchedulingKey() - if obj, ok := srv.jobSchedulingResultsCache.Get(schedulingKey); ok { + if obj, ok := state.jobSchedulingResultsCache.Get(schedulingKey); ok { return obj.(schedulingResult) } @@ -216,45 +179,47 @@ func (srv *SubmitChecker) getIndividualSchedulingResult(jctx *schedulercontext.J }() gctx := schedulercontext.NewGangSchedulingContext([]*schedulercontext.JobSchedulingContext{jctx}) - result := srv.getSchedulingResult(gctx) + result := srv.getSchedulingResult(gctx, state) - srv.jobSchedulingResultsCache.Add(schedulingKey, result) + state.jobSchedulingResultsCache.Add(schedulingKey, result) return result } // Check if a set of jobs can be scheduled onto some cluster. -func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedulingContext) schedulingResult { - // Skip submit checks if this batch contains less than the min cardinality jobs. - // Reason: - // - We need to support submitting gang jobs across batches and allow for gang jobs to queue until min cardinality is satisfied. - // - We cannot verify if min cardinality jobs are schedulable unless we are given at least that many in a single batch. - // - A side effect of this is that users can submit jobs in gangs that skip this check and are never schedulable, which will be handled via queue-ttl. - if len(gctx.JobSchedulingContexts) < gctx.GangInfo.MinimumCardinality { - return schedulingResult{isSchedulable: true, reason: ""} - } +// TODO: there are a number of things this won't catch: +// - Node Uniformity Label (although it will work if this is per cluster) +// - Gang jobs that will use more than the allowed capacity limit +func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedulingContext, state *executorState) schedulingResult { + sucessfulPools := map[string]bool{} + var sb strings.Builder + for id, ex := range state.executorsById { - // Make a shallow copy to avoid holding the lock and - // preventing updating NodeDbs while checking if jobs can be scheduled - srv.mu.Lock() - executorById := maps.Clone(srv.executorById) - srv.mu.Unlock() - executorById = srv.filterStaleExecutors(executorById) - if len(executorById) == 0 { - return schedulingResult{isSchedulable: false, reason: "no executor clusters available"} - } + // If we already know we can schedule on this pool then we are good + if sucessfulPools[ex.pool] { + continue + } - isSchedulable := false - var sb strings.Builder - for id, executor := range executorById { - nodeDb := executor.nodeDb - txn := nodeDb.Txn(true) - // TODO: This doesn't account for per-queue limits or the NodeUniformityLabel. - // We should create a GangScheduler for this instead. - ok, err := nodeDb.ScheduleManyWithTxn(txn, gctx) - txn.Abort() + // if job doesn't meet the minimum resource requirements we can skip + meetsMinimum := true + for _, jctx := range gctx.JobSchedulingContexts { + requests := jctx.PodRequirements.ResourceRequirements.Requests + if ok, _ := constraints.RequestsAreLargeEnough(schedulerobjects.ResourceListFromV1ResourceList(requests).Resources, ex.minimumJobSize.Resources); !ok { + meetsMinimum = false + } + } - isSchedulable = isSchedulable || ok + if !meetsMinimum { + sb.WriteString("Job size is below the minimum required by the cluster") + sb.WriteString("\n") + sb.WriteString("---") + sb.WriteString("\n") + continue + } + + txn := ex.nodeDb.Txn(true) + ok, err := ex.nodeDb.ScheduleManyWithTxn(txn, gctx) + txn.Abort() sb.WriteString(id) if err != nil { @@ -263,6 +228,11 @@ func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedul continue } + if ok { + sucessfulPools[ex.pool] = true + continue + } + numSuccessfullyScheduled := 0 for _, jctx := range gctx.JobSchedulingContexts { if jctx.PodSchedulingContext.IsSuccessful() { @@ -284,48 +254,38 @@ func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedul sb.WriteString( fmt.Sprintf( ": %d out of %d pods schedulable (minCardinality %d)\n", - numSuccessfullyScheduled, len(gctx.JobSchedulingContexts), gctx.GangInfo.MinimumCardinality, + numSuccessfullyScheduled, len(gctx.JobSchedulingContexts), gctx.GangInfo.Cardinality, ), ) } } - return schedulingResult{isSchedulable: isSchedulable, reason: sb.String()} -} - -func (srv *SubmitChecker) filterStaleExecutors(executorsById map[string]minimalExecutor) map[string]minimalExecutor { - rv := make(map[string]minimalExecutor) - for id, executor := range executorsById { - if srv.clock.Since(executor.updateTime) < srv.executorTimeout { - rv[id] = executor - } + if len(sucessfulPools) > 0 { + return schedulingResult{isSchedulable: true, pools: maps.Keys(sucessfulPools)} } - return rv + return schedulingResult{isSchedulable: false, reason: sb.String()} } -func (srv *SubmitChecker) constructNodeDb(nodes []*schedulerobjects.Node) (*nodedb.NodeDb, error) { +func (srv *SubmitChecker) constructNodeDb(executor *schedulerobjects.Executor) (*nodedb.NodeDb, error) { nodeDb, err := nodedb.NewNodeDb( - srv.priorityClasses, - 0, - srv.indexedResources, - srv.indexedTaints, - srv.indexedNodeLabels, - srv.wellKnownNodeTypes, - stringinterner.New(512), + srv.schedulingConfig.PriorityClasses, + srv.schedulingConfig.IndexedResources, + srv.schedulingConfig.IndexedTaints, + srv.schedulingConfig.IndexedNodeLabels, + srv.schedulingConfig.WellKnownNodeTypes, + stringinterner.New(10000), + srv.resourceListFactory, ) if err != nil { return nil, err } txn := nodeDb.Txn(true) defer txn.Abort() - for _, node := range nodes { + for _, node := range executor.Nodes { if err := nodeDb.CreateAndInsertWithJobDbJobsWithTxn(txn, nil, node); err != nil { return nil, err } } txn.Commit() - if err != nil { - return nil, err - } err = nodeDb.ClearAllocated() if err != nil { return nil, err diff --git a/internal/scheduler/submitcheck_test.go b/internal/scheduler/submitcheck_test.go index 75baaa1dafb..565a4562629 100644 --- a/internal/scheduler/submitcheck_test.go +++ b/internal/scheduler/submitcheck_test.go @@ -5,171 +5,193 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/sirupsen/logrus" + "github.com/google/uuid" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/clock" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + "k8s.io/apimachinery/pkg/api/resource" + clock "k8s.io/utils/clock/testing" - "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" - armadaslices "github.com/armadaproject/armada/internal/common/slices" + "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/testfixtures" - "github.com/armadaproject/armada/pkg/armadaevents" ) func TestSubmitChecker_CheckJobDbJobs(t *testing.T) { defaultTimeout := 15 * time.Minute baseTime := time.Now().UTC() - expiredTime := baseTime.Add(-defaultTimeout).Add(-1 * time.Second) + // expiredTime := baseTime.Add(-defaultTimeout).Add(-1 * time.Second) + smallJob1 := testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1) + smallJob2 := testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1) + largeJob1 := testfixtures.Test32Cpu256GiJob("queue", testfixtures.PriorityClass1) + + // This Gang job will fit + smallGangJob := testfixtures. + WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("queue", testfixtures.PriorityClass1, 2)) + + // This gang job doesn't fit as we only have room for three of these jobs + largeGangJob := testfixtures. + WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("queue", testfixtures.PriorityClass1, 4)) tests := map[string]struct { executorTimout time.Duration config configuration.SchedulingConfig executors []*schedulerobjects.Executor - job *jobdb.Job - expectPass bool + jobs []*jobdb.Job + expectedResult map[string]schedulingResult }{ - "one job schedules": { - executorTimout: defaultTimeout, - config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, - job: testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1), - expectPass: true, - }, - "no jobs schedule due to resources": { + "One job schedulable": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, - job: testfixtures.Test32Cpu256GiJob("queue", testfixtures.PriorityClass1), - expectPass: false, + jobs: []*jobdb.Job{smallJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "no jobs schedule due to selector": { + "One job schedulable, multiple executors": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, - job: testfixtures.WithNodeSelectorJob(map[string]string{"foo": "bar"}, testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1)), - expectPass: false, - }, - "no jobs schedule due to executor timeout": { - executorTimout: defaultTimeout, - config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(expiredTime)}, - job: testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1), - expectPass: false, + executors: []*schedulerobjects.Executor{ + testfixtures.TestExecutor(baseTime), + testfixtures.TestExecutor(baseTime), + }, + jobs: []*jobdb.Job{smallJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "multiple executors, 1 expired": { - executorTimout: defaultTimeout, - config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(expiredTime), testfixtures.TestExecutor(baseTime)}, - job: testfixtures.Test1Cpu4GiJob("queue", testfixtures.PriorityClass1), - expectPass: true, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), 5*time.Second) - defer cancel() - - ctrl := gomock.NewController(t) - mockExecutorRepo := schedulermocks.NewMockExecutorRepository(ctrl) - mockExecutorRepo.EXPECT().GetExecutors(ctx).Return(tc.executors, nil).AnyTimes() - fakeClock := clock.NewFakeClock(baseTime) - submitCheck := NewSubmitChecker(tc.executorTimout, tc.config, mockExecutorRepo) - submitCheck.clock = fakeClock - submitCheck.updateExecutors(ctx) - isSchedulable, reason := submitCheck.CheckJobDbJobs([]*jobdb.Job{tc.job}) - assert.Equal(t, tc.expectPass, isSchedulable) - if !tc.expectPass { - assert.NotEqual(t, "", reason) - } - logrus.Info(reason) - }) - } -} - -func TestSubmitChecker_TestCheckApiJobs(t *testing.T) { - defaultTimeout := 15 * time.Minute - testfixtures.BaseTime = time.Now().UTC() - expiredTime := testfixtures.BaseTime.Add(-defaultTimeout).Add(-1 * time.Second) - - tests := map[string]struct { - executorTimout time.Duration - config configuration.SchedulingConfig - executors []*schedulerobjects.Executor - jobs []*armadaevents.SubmitJob - expectPass bool - }{ - "one job schedules": { + "One job schedulable, multiple executors but only fits on one": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsg()}, - expectPass: true, + executors: []*schedulerobjects.Executor{ + testfixtures.TestExecutor(baseTime), + { + Id: uuid.NewString(), + Pool: "cpu", + LastUpdateTime: baseTime, + Nodes: nil, + }, + }, + jobs: []*jobdb.Job{smallJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "multiple jobs schedule": { + "One job schedulable, multiple pools": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsg(), testfixtures.Test1CoreSubmitMsg()}, - expectPass: true, + executors: []*schedulerobjects.Executor{ + testfixtures.TestExecutor(baseTime), + { + Id: uuid.NewString(), + Pool: "cpu2", + LastUpdateTime: baseTime, + Nodes: testfixtures.TestCluster(), + }, + }, + jobs: []*jobdb.Job{smallJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu", "cpu2"}}, + }, }, - "first job schedules, second doesn't": { + "One job schedulable, minimum job size respected": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsg(), testfixtures.Test100CoreSubmitMsg()}, - expectPass: false, + executors: []*schedulerobjects.Executor{ + testfixtures.TestExecutor(baseTime), + { + Id: uuid.NewString(), + Pool: "cpu2", + LastUpdateTime: baseTime, + MinimumJobSize: schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("100"), + }, + }, + Nodes: testfixtures.TestCluster(), + }, + }, + jobs: []*jobdb.Job{smallJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "no jobs schedule due to resources": { + "Two jobs schedules": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test100CoreSubmitMsg()}, - expectPass: false, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: []*jobdb.Job{smallJob1, smallJob2}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + smallJob2.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "no jobs schedule due to selector": { + "One job schedulable, one not due to resources": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsgWithNodeSelector(map[string]string{"foo": "bar"})}, - expectPass: false, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: []*jobdb.Job{smallJob1, largeJob1}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + largeJob1.Id(): {isSchedulable: false}, + }, }, - "no jobs schedule due to executor timeout": { + "No jobs schedulable due to resources": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(expiredTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsg()}, - expectPass: false, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: []*jobdb.Job{largeJob1}, + expectedResult: map[string]schedulingResult{ + largeJob1.Id(): {isSchedulable: false}, + }, }, - "multiple executors, 1 expired": { + "No jobs schedulable due to selector": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(expiredTime), testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: []*armadaevents.SubmitJob{testfixtures.Test1CoreSubmitMsg()}, - expectPass: true, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: []*jobdb.Job{testfixtures.WithNodeSelectorJob(map[string]string{"foo": "bar"}, smallJob1)}, + expectedResult: map[string]schedulingResult{ + smallJob1.Id(): {isSchedulable: false}, + }, }, - "gang job all jobs fit": { + "Gang Schedules": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: testfixtures.TestNSubmitMsgGang(5), - expectPass: true, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: smallGangJob, + expectedResult: map[string]schedulingResult{ + smallGangJob[0].Id(): {isSchedulable: true, pools: []string{"cpu"}}, + smallGangJob[1].Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, - "gang job all jobs don't fit": { + "Individual jobs fit but gang doesn't": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: testfixtures.TestNSubmitMsgGang(100), - expectPass: false, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: largeGangJob, + expectedResult: map[string]schedulingResult{ + largeGangJob[0].Id(): {isSchedulable: false}, + largeGangJob[1].Id(): {isSchedulable: false}, + largeGangJob[2].Id(): {isSchedulable: false}, + largeGangJob[3].Id(): {isSchedulable: false}, + }, }, - "Less than min cardinality gang jobs in a batch skips submit check": { + "One job fits, one gang doesn't, out of order": { executorTimout: defaultTimeout, config: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(testfixtures.BaseTime)}, - jobs: testfixtures.TestNSubmitMsgGangLessThanMinCardinality(5), - expectPass: true, + executors: []*schedulerobjects.Executor{testfixtures.TestExecutor(baseTime)}, + jobs: []*jobdb.Job{largeGangJob[0], smallJob1, largeGangJob[1], largeGangJob[2], largeGangJob[3]}, + expectedResult: map[string]schedulingResult{ + largeGangJob[0].Id(): {isSchedulable: false}, + largeGangJob[1].Id(): {isSchedulable: false}, + largeGangJob[2].Id(): {isSchedulable: false}, + largeGangJob[3].Id(): {isSchedulable: false}, + smallJob1.Id(): {isSchedulable: true, pools: []string{"cpu"}}, + }, }, } for name, tc := range tests { @@ -180,22 +202,23 @@ func TestSubmitChecker_TestCheckApiJobs(t *testing.T) { ctrl := gomock.NewController(t) mockExecutorRepo := schedulermocks.NewMockExecutorRepository(ctrl) mockExecutorRepo.EXPECT().GetExecutors(ctx).Return(tc.executors, nil).AnyTimes() - fakeClock := clock.NewFakeClock(testfixtures.BaseTime) - submitCheck := NewSubmitChecker(tc.executorTimout, tc.config, mockExecutorRepo) + fakeClock := clock.NewFakeClock(baseTime) + submitCheck := NewSubmitChecker(tc.config, mockExecutorRepo, testfixtures.TestResourceListFactory) submitCheck.clock = fakeClock submitCheck.updateExecutors(ctx) - events := armadaslices.Map(tc.jobs, func(s *armadaevents.SubmitJob) *armadaevents.EventSequence_Event { - return &armadaevents.EventSequence_Event{ - Event: &armadaevents.EventSequence_Event_SubmitJob{SubmitJob: s}, - } - }) - es := &armadaevents.EventSequence{Events: events} - result, msg := submitCheck.CheckApiJobs(es, testfixtures.TestDefaultPriorityClass) - assert.Equal(t, tc.expectPass, result) - if !tc.expectPass { - assert.NotEqual(t, "", msg) + results, err := submitCheck.Check(ctx, tc.jobs) + require.NoError(t, err) + require.Equal(t, len(tc.expectedResult), len(results)) + for id, expected := range tc.expectedResult { + actualResult, ok := results[id] + require.True(t, ok) + actualResult.reason = "" // clear reason as we don't test this + + // sort pools as we don't care about order + slices.Sort(actualResult.pools) + slices.Sort(expected.pools) + assert.Equal(t, expected, actualResult) } - logrus.Info(msg) }) } } diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index 7c474eb7349..dac90abbf39 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -16,14 +16,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" schedulerconfiguration "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/armadaevents" - "github.com/armadaproject/armada/pkg/client/queue" ) const ( @@ -39,33 +41,31 @@ const ( ) var ( - BaseTime, _ = time.Parse("2006-01-02T15:04:05.000Z", "2022-03-01T15:04:05.000Z") - TestPriorityClasses = map[string]types.PriorityClass{ + TestResourceListFactory = MakeTestResourceListFactory() + BaseTime, _ = time.Parse("2006-01-02T15:04:05.000Z", "2022-03-01T15:04:05.000Z") + TestPriorityClasses = map[string]types.PriorityClass{ PriorityClass0: {Priority: 0, Preemptible: true}, PriorityClass1: {Priority: 1, Preemptible: true}, PriorityClass2: {Priority: 2, Preemptible: true}, PriorityClass2NonPreemptible: {Priority: 2, Preemptible: false}, PriorityClass3: {Priority: 3, Preemptible: false}, + "armada-preemptible-away": {Priority: 30000, Preemptible: true, AwayNodeTypes: []types.AwayNodeType{{Priority: 29000, WellKnownNodeTypeName: "gpu"}}}, + "armada-preemptible": {Priority: 30000, Preemptible: true}, } - TestDefaultPriorityClass = PriorityClass3 - TestPriorities = []int32{0, 1, 2, 3} - TestMaxExtraNodesToConsider uint = 1 - TestResources = []configuration.IndexedResource{ + TestDefaultPriorityClass = PriorityClass3 + TestPriorities = []int32{0, 1, 2, 3} + TestResources = []schedulerconfiguration.ResourceType{ {Name: "cpu", Resolution: resource.MustParse("1")}, {Name: "memory", Resolution: resource.MustParse("128Mi")}, - {Name: "gpu", Resolution: resource.MustParse("1")}, + {Name: "nvidia.com/gpu", Resolution: resource.MustParse("1")}, } - TestResourceNames = util.Map( + TestResourceNames = slices.Map( TestResources, - func(v configuration.IndexedResource) string { return v.Name }, - ) - TestIndexedResourceResolutionMillis = util.Map( - TestResources, - func(v configuration.IndexedResource) int64 { return v.Resolution.MilliValue() }, + func(v schedulerconfiguration.ResourceType) string { return v.Name }, ) TestIndexedTaints = []string{"largeJobsOnly", "gpu"} TestIndexedNodeLabels = []string{"largeJobsOnly", "gpu"} - TestWellKnownNodeTypes = []configuration.WellKnownNodeType{ + TestWellKnownNodeTypes = []schedulerconfiguration.WellKnownNodeType{ { Name: "gpu", Taints: []v1.Taint{{Key: "gpu", Value: "true", Effect: v1.TaintEffectNoSchedule}}, @@ -78,11 +78,11 @@ var ( // We use the all-zeros key here to ensure scheduling keys are cosnsitent between tests. SchedulingKeyGenerator = schedulerobjects.NewSchedulingKeyGeneratorWithKey(make([]byte, 32)) // Used for job creation. - JobDb = NewJobDb() + JobDb = NewJobDb(TestResourceListFactory) ) func NewJobDbWithJobs(jobs []*jobdb.Job) *jobdb.JobDb { - jobDb := NewJobDb() + jobDb := NewJobDb(TestResourceListFactory) txn := jobDb.WriteTxn() defer txn.Abort() if err := txn.Upsert(jobs); err != nil { @@ -93,12 +93,13 @@ func NewJobDbWithJobs(jobs []*jobdb.Job) *jobdb.JobDb { } // NewJobDb returns a new default jobDb with defaults to use in tests. -func NewJobDb() *jobdb.JobDb { +func NewJobDb(resourceListFactory *internaltypes.ResourceListFactory) *jobdb.JobDb { jobDb := jobdb.NewJobDbWithSchedulingKeyGenerator( TestPriorityClasses, TestDefaultPriorityClass, SchedulingKeyGenerator, stringinterner.New(1024), + resourceListFactory, ) // Mock out the clock and uuid provider to ensure consistent ids and timestamps are generated. jobDb.SetClock(NewMockPassiveClock()) @@ -106,6 +107,40 @@ func NewJobDb() *jobdb.JobDb { return jobDb } +func NewJob( + jobId string, + jobSet string, + queue string, + priority uint32, + schedulingInfo *schedulerobjects.JobSchedulingInfo, + queued bool, + queuedVersion int32, + cancelRequested bool, + cancelByJobSetRequested bool, + cancelled bool, + created int64, + validated bool, +) *jobdb.Job { + job, err := JobDb.NewJob(jobId, + jobSet, + queue, + priority, + schedulingInfo, + queued, + queuedVersion, + cancelRequested, + cancelByJobSetRequested, + cancelled, + created, + validated, + []string{}, + ) + if err != nil { + panic(err) + } + return job +} + func IntRange(a, b int) []int { rv := make([]int, b-a+1) for i := range rv { @@ -122,17 +157,14 @@ func Repeat[T any](v T, n int) []T { return rv } -func TestSchedulingConfig() configuration.SchedulingConfig { - return configuration.SchedulingConfig{ +func TestSchedulingConfig() schedulerconfiguration.SchedulingConfig { + return schedulerconfiguration.SchedulingConfig{ PriorityClasses: maps.Clone(TestPriorityClasses), DefaultPriorityClassName: TestDefaultPriorityClass, - NodeEvictionProbability: 1.0, - NodeOversubscriptionEvictionProbability: 1.0, MaximumSchedulingRate: math.Inf(1), MaximumSchedulingBurst: math.MaxInt, MaximumPerQueueSchedulingRate: math.Inf(1), MaximumPerQueueSchedulingBurst: math.MaxInt, - MaxExtraNodesToConsider: TestMaxExtraNodesToConsider, IndexedResources: TestResources, IndexedNodeLabels: TestIndexedNodeLabels, IndexedTaints: TestIndexedTaints, @@ -140,40 +172,31 @@ func TestSchedulingConfig() configuration.SchedulingConfig { DominantResourceFairnessResourcesToConsider: TestResourceNames, ExecutorTimeout: 15 * time.Minute, MaxUnacknowledgedJobsPerExecutor: math.MaxInt, + SupportedResourceTypes: GetTestSupportedResourceTypes(), } } -func WithMaxUnacknowledgedJobsPerExecutorConfig(v uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithMaxUnacknowledgedJobsPerExecutorConfig(v uint, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaxUnacknowledgedJobsPerExecutor = v return config } -func WithProtectedFractionOfFairShareConfig(v float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithProtectedFractionOfFairShareConfig(v float64, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.ProtectedFractionOfFairShare = v return config } -func WithNodeEvictionProbabilityConfig(p float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { - config.NodeEvictionProbability = p - return config -} - -func WithNodeOversubscriptionEvictionProbabilityConfig(p float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { - config.NodeOversubscriptionEvictionProbability = p - return config -} - -func WithRoundLimitsConfig(limits map[string]float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithRoundLimitsConfig(limits map[string]float64, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaximumResourceFractionToSchedule = limits return config } -func WithRoundLimitsPoolConfig(limits map[string]map[string]float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithRoundLimitsPoolConfig(limits map[string]map[string]float64, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaximumResourceFractionToScheduleByPool = limits return config } -func WithPerPriorityLimitsConfig(limits map[string]map[string]float64, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithPerPriorityLimitsConfig(limits map[string]map[string]float64, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { for priorityClassName, limit := range limits { priorityClass, ok := config.PriorityClasses[priorityClassName] if !ok { @@ -190,39 +213,39 @@ func WithPerPriorityLimitsConfig(limits map[string]map[string]float64, config co return config } -func WithIndexedResourcesConfig(indexResources []configuration.IndexedResource, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithIndexedResourcesConfig(indexResources []schedulerconfiguration.ResourceType, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.IndexedResources = indexResources return config } -func WithGlobalSchedulingRateLimiterConfig(maximumSchedulingRate float64, maximumSchedulingBurst int, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithGlobalSchedulingRateLimiterConfig(maximumSchedulingRate float64, maximumSchedulingBurst int, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaximumSchedulingRate = maximumSchedulingRate config.MaximumSchedulingBurst = maximumSchedulingBurst return config } -func WithPerQueueSchedulingLimiterConfig(maximumPerQueueSchedulingRate float64, maximumPerQueueSchedulingBurst int, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithPerQueueSchedulingLimiterConfig(maximumPerQueueSchedulingRate float64, maximumPerQueueSchedulingBurst int, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaximumPerQueueSchedulingRate = maximumPerQueueSchedulingRate config.MaximumPerQueueSchedulingBurst = maximumPerQueueSchedulingBurst return config } -func WithMaxLookbackPerQueueConfig(n uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithMaxLookbackPerQueueConfig(n uint, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaxQueueLookback = n return config } -func WithIndexedTaintsConfig(indexedTaints []string, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithIndexedTaintsConfig(indexedTaints []string, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.IndexedTaints = append(config.IndexedTaints, indexedTaints...) return config } -func WithIndexedNodeLabelsConfig(indexedNodeLabels []string, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithIndexedNodeLabelsConfig(indexedNodeLabels []string, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.IndexedNodeLabels = append(config.IndexedNodeLabels, indexedNodeLabels...) return config } -func WithMaxQueueLookbackConfig(maxQueueLookback uint, config configuration.SchedulingConfig) configuration.SchedulingConfig { +func WithMaxQueueLookbackConfig(maxQueueLookback uint, config schedulerconfiguration.SchedulingConfig) schedulerconfiguration.SchedulingConfig { config.MaxQueueLookback = maxQueueLookback return config } @@ -343,6 +366,7 @@ func WithNodeSelectorJobs(selector map[string]string, jobs []*jobdb.Job) []*jobd } func WithNodeSelectorJob(selector map[string]string, job *jobdb.Job) *jobdb.Job { + job = job.DeepCopy() for _, req := range job.JobSchedulingInfo().GetObjectRequirements() { req.GetPodRequirements().NodeSelector = maps.Clone(selector) } @@ -353,21 +377,7 @@ func WithGangAnnotationsJobs(jobs []*jobdb.Job) []*jobdb.Job { gangId := uuid.NewString() gangCardinality := fmt.Sprintf("%d", len(jobs)) return WithAnnotationsJobs( - map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality, configuration.GangMinimumCardinalityAnnotation: gangCardinality}, - jobs, - ) -} - -func WithGangAnnotationsAndMinCardinalityJobs(minimumCardinality int, jobs []*jobdb.Job) []*jobdb.Job { - gangId := uuid.NewString() - gangCardinality := fmt.Sprintf("%d", len(jobs)) - gangMinCardinality := fmt.Sprintf("%d", minimumCardinality) - return WithAnnotationsJobs( - map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: gangCardinality, - configuration.GangMinimumCardinalityAnnotation: gangMinCardinality, - }, + map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality}, jobs, ) } @@ -435,7 +445,7 @@ func extractPriority(priorityClassName string) int32 { func TestJob(queue string, jobId ulid.ULID, priorityClassName string, req *schedulerobjects.PodRequirements) *jobdb.Job { created := jobTimestamp.Add(1) submitTime := time.Time{}.Add(time.Millisecond * time.Duration(created)) - return JobDb.NewJob( + job, _ := JobDb.NewJob( jobId.String(), TestJobset, queue, @@ -458,7 +468,10 @@ func TestJob(queue string, jobId ulid.ULID, priorityClassName string, req *sched false, false, created, + false, + []string{}, ) + return job } func Test1Cpu4GiJob(queue string, priorityClassName string) *jobdb.Job { @@ -595,9 +608,9 @@ func Test1GpuPodReqs(queue string, jobId ulid.ULID, priority int32) *schedulerob jobId, priority, v1.ResourceList{ - "cpu": resource.MustParse("8"), - "memory": resource.MustParse("128Gi"), - "gpu": resource.MustParse("1"), + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("128Gi"), + "nvidia.com/gpu": resource.MustParse("1"), }, ) req.Tolerations = []v1.Toleration{ @@ -706,8 +719,8 @@ func N8GpuNodes(n int, priorities []int32) []*schedulerobjects.Node { return rv } -func SingleQueuePriorityOne(name string) []queue.Queue { - return []queue.Queue{{Name: name, PriorityFactor: 1.0}} +func SingleQueuePriorityOne(name string) []*api.Queue { + return []*api.Queue{{Name: name, PriorityFactor: 1.0}} } func TestNode(priorities []int32, resources map[string]resource.Quantity) *schedulerobjects.Node { @@ -756,9 +769,9 @@ func Test8GpuNode(priorities []int32) *schedulerobjects.Node { node := TestNode( priorities, map[string]resource.Quantity{ - "cpu": resource.MustParse("64"), - "memory": resource.MustParse("1024Gi"), - "gpu": resource.MustParse("8"), + "cpu": resource.MustParse("64"), + "memory": resource.MustParse("1024Gi"), + "nvidia.com/gpu": resource.MustParse("8"), }, ) node.Labels["gpu"] = "true" @@ -782,15 +795,15 @@ func Test1Node32CoreExecutor(executorId string) *schedulerobjects.Executor { } } -func MakeTestQueue() queue.Queue { - return queue.Queue{ +func MakeTestQueue() *api.Queue { + return &api.Queue{ Name: TestQueue, PriorityFactor: 100, } } func TestQueuedJobDbJob() *jobdb.Job { - return JobDb.NewJob( + job, _ := JobDb.NewJob( util.NewULID(), TestJobset, TestQueue, @@ -812,21 +825,10 @@ func TestQueuedJobDbJob() *jobdb.Job { false, false, BaseTime.UnixNano(), + false, + []string{}, ) -} - -func WithJobDbJobPodRequirements(job *jobdb.Job, reqs *schedulerobjects.PodRequirements) *jobdb.Job { - return job.WithJobSchedulingInfo(&schedulerobjects.JobSchedulingInfo{ - PriorityClassName: job.JobSchedulingInfo().PriorityClassName, - SubmitTime: job.JobSchedulingInfo().SubmitTime, - ObjectRequirements: []*schedulerobjects.ObjectRequirements{ - { - Requirements: &schedulerobjects.ObjectRequirements_PodRequirements{ - PodRequirements: reqs, - }, - }, - }, - }) + return job } func TestRunningJobDbJob(startTime int64) *jobdb.Job { @@ -885,24 +887,8 @@ func TestNSubmitMsgGang(n int) []*armadaevents.SubmitJob { for i := 0; i < n; i++ { job := Test1CoreSubmitMsg() job.MainObject.ObjectMeta.Annotations = map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n), - configuration.GangMinimumCardinalityAnnotation: fmt.Sprintf("%d", n), - } - gang[i] = job - } - return gang -} - -func TestNSubmitMsgGangLessThanMinCardinality(n int) []*armadaevents.SubmitJob { - gangId := uuid.NewString() - gang := make([]*armadaevents.SubmitJob, n) - for i := 0; i < n; i++ { - job := Test1CoreSubmitMsg() - job.MainObject.ObjectMeta.Annotations = map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n+2), - configuration.GangMinimumCardinalityAnnotation: fmt.Sprintf("%d", n+1), + configuration.GangIdAnnotation: gangId, + configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n), } gang[i] = job } @@ -961,3 +947,16 @@ func (p *MockPassiveClock) Now() time.Time { func (p *MockPassiveClock) Since(time.Time) time.Duration { panic("Not implemented") } + +func MakeTestResourceListFactory() *internaltypes.ResourceListFactory { + result, _ := internaltypes.MakeResourceListFactory(GetTestSupportedResourceTypes()) + return result +} + +func GetTestSupportedResourceTypes() []schedulerconfiguration.ResourceType { + return []schedulerconfiguration.ResourceType{ + {Name: "memory", Resolution: resource.MustParse("1")}, + {Name: "cpu", Resolution: resource.MustParse("1m")}, + {Name: "nvidia.com/gpu", Resolution: resource.MustParse("1m")}, + } +} diff --git a/internal/scheduleringester/dbops.go b/internal/scheduleringester/dbops.go index 47105c0620f..e1a6e8cb407 100644 --- a/internal/scheduleringester/dbops.go +++ b/internal/scheduleringester/dbops.go @@ -42,6 +42,11 @@ type JobSetKey struct { jobSet string } +type JobReprioritiseKey struct { + JobSetKey + Priority int64 +} + type JobRunDetails struct { Queue string DbRun *schedulerdb.Run @@ -120,11 +125,10 @@ type ( InsertRuns map[uuid.UUID]*JobRunDetails UpdateJobSetPriorities map[JobSetKey]int64 MarkJobSetsCancelRequested map[JobSetKey]*JobSetCancelAction - MarkJobsCancelRequested map[string]bool + MarkJobsCancelRequested map[JobSetKey][]string MarkJobsCancelled map[string]time.Time MarkJobsSucceeded map[string]bool MarkJobsFailed map[string]bool - UpdateJobPriorities map[string]int64 UpdateJobSchedulingInfo map[string]*JobSchedulingInfoUpdate UpdateJobQueuedState map[string]*JobQueuedStateUpdate MarkRunsSucceeded map[uuid.UUID]time.Time @@ -134,7 +138,12 @@ type ( MarkRunsPending map[uuid.UUID]time.Time MarkRunsPreempted map[uuid.UUID]time.Time InsertJobRunErrors map[uuid.UUID]*schedulerdb.JobRunError - InsertPartitionMarker struct { + UpdateJobPriorities struct { + key JobReprioritiseKey + jobIds []string + } + MarkJobsValidated map[string][]string + InsertPartitionMarker struct { markers []*schedulerdb.Marker } ) @@ -170,7 +179,7 @@ func (a MarkJobSetsCancelRequested) Merge(b DbOperation) bool { } func (a MarkJobsCancelRequested) Merge(b DbOperation) bool { - return mergeInMap(a, b) + return mergeListMaps(a, b) } func (a MarkRunsForJobPreemptRequested) Merge(b DbOperation) bool { @@ -225,8 +234,15 @@ func (a MarkJobsFailed) Merge(b DbOperation) bool { return mergeInMap(a, b) } -func (a UpdateJobPriorities) Merge(b DbOperation) bool { - return mergeInMap(a, b) +func (a *UpdateJobPriorities) Merge(b DbOperation) bool { + switch op := b.(type) { + case *UpdateJobPriorities: + if a.key == op.key { + a.jobIds = append(a.jobIds, op.jobIds...) + return true + } + } + return false } func (a MarkRunsSucceeded) Merge(b DbOperation) bool { @@ -253,6 +269,10 @@ func (a InsertJobRunErrors) Merge(b DbOperation) bool { return mergeInMap(a, b) } +func (a MarkJobsValidated) Merge(b DbOperation) bool { + return mergeInMap(a, b) +} + func (a *InsertPartitionMarker) Merge(b DbOperation) bool { switch op := b.(type) { case *InsertPartitionMarker: @@ -332,7 +352,7 @@ func (a InsertRuns) CanBeAppliedBefore(b DbOperation) bool { } func (a UpdateJobSetPriorities) CanBeAppliedBefore(b DbOperation) bool { - _, isUpdateJobPriorities := b.(UpdateJobPriorities) + _, isUpdateJobPriorities := b.(*UpdateJobPriorities) return !isUpdateJobPriorities && !definesJobInSet(a, b) } @@ -341,7 +361,7 @@ func (a MarkJobSetsCancelRequested) CanBeAppliedBefore(b DbOperation) bool { } func (a MarkJobsCancelRequested) CanBeAppliedBefore(b DbOperation) bool { - return !definesJob(a, b) && !definesRunForJob(a, b) + return !definesJobInSet(a, b) && !definesRunInSet(a, b) } func (a MarkRunsForJobPreemptRequested) CanBeAppliedBefore(b DbOperation) bool { @@ -368,9 +388,12 @@ func (a UpdateJobQueuedState) CanBeAppliedBefore(b DbOperation) bool { return !definesJob(a, b) } -func (a UpdateJobPriorities) CanBeAppliedBefore(b DbOperation) bool { +func (a *UpdateJobPriorities) CanBeAppliedBefore(b DbOperation) bool { _, isUpdateJobSetPriorities := b.(UpdateJobSetPriorities) - return !isUpdateJobSetPriorities && !definesJob(a, b) + _, isUpdateJobPriorities := b.(*UpdateJobPriorities) + return !isUpdateJobPriorities && !isUpdateJobSetPriorities && + !definesJobInSet(map[JobSetKey]bool{a.key.JobSetKey: true}, b) && + !definesRunInSet(map[JobSetKey]bool{a.key.JobSetKey: true}, b) } func (a MarkRunsSucceeded) CanBeAppliedBefore(b DbOperation) bool { @@ -404,6 +427,10 @@ func (a InsertJobRunErrors) CanBeAppliedBefore(_ DbOperation) bool { return true } +func (a MarkJobsValidated) CanBeAppliedBefore(b DbOperation) bool { + return !definesJob(a, b) +} + // definesJobInSet returns true if b is an InsertJobs operation // that inserts at least one job in any of the job sets that make // up the keys of a. @@ -455,16 +482,3 @@ func definesRun[M ~map[uuid.UUID]V, V any](a M, b DbOperation) bool { } return false } - -// definesRunForJob returns true if b is an InsertRuns operation -// that inserts at least one run with job id equal to any of the keys of a. -func definesRunForJob[M ~map[string]V, V any](a M, b DbOperation) bool { - if op, ok := b.(InsertRuns); ok { - for _, run := range op { - if _, ok := a[run.DbRun.JobID]; ok { - return true - } - } - } - return false -} diff --git a/internal/scheduleringester/dbops_test.go b/internal/scheduleringester/dbops_test.go index 552f24a20d4..a3228674399 100644 --- a/internal/scheduleringester/dbops_test.go +++ b/internal/scheduleringester/dbops_test.go @@ -19,17 +19,17 @@ func TestMerge(t *testing.T) { jobId1 := util.NewULID() jobId2 := util.NewULID() jobId3 := util.NewULID() - markJobsCancelled1 := MarkJobsCancelRequested{jobId1: false, jobId2: false} - markJobsCancelled2 := MarkJobsCancelRequested{jobId2: true, jobId3: true} - ok := markJobsCancelled1.Merge(markJobsCancelled2) + markJobsFailed1 := MarkJobsFailed{jobId1: false, jobId2: false} + markJobsFailed2 := MarkJobsFailed{jobId2: true, jobId3: true} + ok := markJobsFailed1.Merge(markJobsFailed2) assert.True(t, ok) - assert.Equal(t, MarkJobsCancelRequested{jobId1: false, jobId2: true, jobId3: true}, markJobsCancelled1) + assert.Equal(t, MarkJobsFailed{jobId1: false, jobId2: true, jobId3: true}, markJobsFailed1) jobId4 := util.NewULID() markJobsSucceeded1 := MarkJobsSucceeded{jobId1: true, jobId4: true} - ok = markJobsCancelled1.Merge(markJobsSucceeded1) + ok = markJobsFailed1.Merge(markJobsSucceeded1) assert.False(t, ok) - assert.Equal(t, MarkJobsCancelRequested{jobId1: false, jobId2: true, jobId3: true}, markJobsCancelled1) + assert.Equal(t, MarkJobsFailed{jobId1: false, jobId2: true, jobId3: true}, markJobsFailed1) } func TestMerge_MarkRunsForJobPreemptRequested(t *testing.T) { @@ -61,6 +61,16 @@ func TestMerge_MarkRunsForJobPreemptRequested(t *testing.T) { markPreemptRequested1) } +func TestMerge_UpdateJobPriorities(t *testing.T) { + jobId1 := util.NewULID() + jobId2 := util.NewULID() + updateJobPriorities := &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 1}, []string{jobId1}} + updateJobPriorities2 := &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 1}, []string{jobId2}} + ok := updateJobPriorities.Merge(updateJobPriorities2) + assert.True(t, ok) + assert.Equal(t, updateJobPriorities, &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 1}, []string{jobId1, jobId2}}) +} + func TestMerge_UpdateJobSchedulingInfo(t *testing.T) { jobId1 := util.NewULID() jobId2 := util.NewULID() @@ -159,15 +169,15 @@ func TestDbOperationOptimisation(t *testing.T) { UpdateJobSetPriorities{JobSetKey{queue: testQueueName, jobSet: "set2"}: 2}, // 3 InsertJobs{jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set1"}}, // 3 }}, - "UpdateJobSetPriorities, UpdateJobPriorities": {N: 4, Ops: []DbOperation{ - InsertJobs{jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}}, // 1 - InsertJobs{jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], Queue: testQueueName, JobSet: "set1"}}, // 1 - UpdateJobPriorities{jobIds[0]: 1}, // 2 - UpdateJobSetPriorities{JobSetKey{queue: testQueueName, jobSet: "set1"}: 2}, // 3 - UpdateJobPriorities{jobIds[1]: 3}, // 4 - InsertJobs{jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set2"}}, // 4 - UpdateJobPriorities{jobIds[1]: 4}, // 4 - UpdateJobPriorities{jobIds[2]: 5}, // 4 + "UpdateJobSetPriorities, UpdateJobPriorities": {N: 5, Ops: []DbOperation{ + InsertJobs{jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}}, // 1 + InsertJobs{jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], Queue: testQueueName, JobSet: "set1"}}, // 1 + &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 1}, []string{jobIds[0]}}, // 2 // 2 + UpdateJobSetPriorities{JobSetKey{queue: testQueueName, jobSet: "set1"}: 2}, // 3 + &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 3}, []string{jobIds[1]}}, // 4 + InsertJobs{jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set2"}}, // 1 + &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 4}, []string{jobIds[1]}}, // 5 + &UpdateJobPriorities{JobReprioritiseKey{JobSetKey{queue: testQueueName, jobSet: "set1"}, 4}, []string{jobIds[2]}}, // 5 }}, "MarkJobSetsCancelRequested": {N: 3, Ops: []DbOperation{ InsertJobs{jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}}, // 1 @@ -179,11 +189,11 @@ func TestDbOperationOptimisation(t *testing.T) { "MarkJobSetsCancelRequested, MarkJobsCancelRequested": {N: 4, Ops: []DbOperation{ InsertJobs{jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}}, // 1 InsertJobs{jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], Queue: testQueueName, JobSet: "set1"}}, // 1 - MarkJobsCancelRequested{jobIds[0]: true}, // 2 + MarkJobsCancelRequested{JobSetKey{queue: testQueueName, jobSet: "set1"}: []string{jobIds[0]}}, // 2 // 2 MarkJobSetsCancelRequested{JobSetKey{queue: testQueueName, jobSet: "set1"}: &JobSetCancelAction{cancelQueued: true, cancelLeased: true}}, // 3 InsertJobs{jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set1"}}, // 4 - MarkJobsCancelRequested{jobIds[1]: true}, // 4 - MarkJobsCancelRequested{jobIds[2]: true}, // 4 + MarkJobsCancelRequested{JobSetKey{queue: testQueueName, jobSet: "set1"}: []string{jobIds[1]}}, // 4 // 4 + MarkJobsCancelRequested{JobSetKey{queue: testQueueName, jobSet: "set1"}: []string{jobIds[2]}}, // 4 // 4 }}, "MarkRunsForJobPreemptRequested": {N: 2, Ops: []DbOperation{ InsertJobs{jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}}, // 1 @@ -421,11 +431,15 @@ func (db *mockDb) apply(op DbOperation) error { } } case MarkJobsCancelRequested: - for jobId := range o { - if job, ok := db.Jobs[jobId]; ok { - job.CancelRequested = true - } else { - return errors.Errorf("job %s not in db", jobId) + for jobSetKey, jobIds := range o { + for _, jobId := range jobIds { + job, ok := db.Jobs[jobId] + if !ok { + return errors.Errorf("job %s not in db", jobId) + } + if job.JobSet == jobSetKey.jobSet && job.Queue == jobSetKey.queue { + job.CancelRequested = true + } } } case MarkJobsSucceeded: @@ -444,13 +458,15 @@ func (db *mockDb) apply(op DbOperation) error { return errors.Errorf("job %s not in db", jobId) } } - case UpdateJobPriorities: - for jobId, priority := range o { - if job, ok := db.Jobs[jobId]; ok { - job.Priority = priority - } else { + case *UpdateJobPriorities: + for _, jobId := range o.jobIds { + job, ok := db.Jobs[jobId] + if !ok { return errors.Errorf("job %s not in db", jobId) } + if job.JobSet == o.key.jobSet && job.Queue == o.key.queue { + job.Priority = o.key.Priority + } } case MarkRunsSucceeded: for runId := range o { diff --git a/internal/scheduleringester/instructions.go b/internal/scheduleringester/instructions.go index e22dcac51b4..9e0468540d8 100644 --- a/internal/scheduleringester/instructions.go +++ b/internal/scheduleringester/instructions.go @@ -96,11 +96,11 @@ func (c *InstructionConverter) dbOperationsFromEventSequence(es *armadaevents.Ev case *armadaevents.EventSequence_Event_JobPreemptionRequested: operationsFromEvent, err = c.handleJobPreemptionRequested(event.GetJobPreemptionRequested(), meta) case *armadaevents.EventSequence_Event_ReprioritiseJob: - operationsFromEvent, err = c.handleReprioritiseJob(event.GetReprioritiseJob()) + operationsFromEvent, err = c.handleReprioritiseJob(event.GetReprioritiseJob(), meta) case *armadaevents.EventSequence_Event_ReprioritiseJobSet: operationsFromEvent, err = c.handleReprioritiseJobSet(event.GetReprioritiseJobSet(), meta) case *armadaevents.EventSequence_Event_CancelJob: - operationsFromEvent, err = c.handleCancelJob(event.GetCancelJob()) + operationsFromEvent, err = c.handleCancelJob(event.GetCancelJob(), meta) case *armadaevents.EventSequence_Event_CancelJobSet: operationsFromEvent, err = c.handleCancelJobSet(event.GetCancelJobSet(), meta) case *armadaevents.EventSequence_Event_CancelledJob: @@ -113,8 +113,9 @@ func (c *InstructionConverter) dbOperationsFromEventSequence(es *armadaevents.Ev operationsFromEvent, err = c.handleJobRunPreempted(event.GetJobRunPreempted(), eventTime) case *armadaevents.EventSequence_Event_JobRunAssigned: operationsFromEvent, err = c.handleJobRunAssigned(event.GetJobRunAssigned(), eventTime) + case *armadaevents.EventSequence_Event_JobValidated: + operationsFromEvent, err = c.handleJobValidated(event.GetJobValidated()) case *armadaevents.EventSequence_Event_ReprioritisedJob, - *armadaevents.EventSequence_Event_JobDuplicateDetected, *armadaevents.EventSequence_Event_ResourceUtilisation, *armadaevents.EventSequence_Event_StandaloneIngressInfo: // These events can all be safely ignored @@ -337,13 +338,20 @@ func (c *InstructionConverter) handleJobPreemptionRequested(preemptionRequested }}, nil } -func (c *InstructionConverter) handleReprioritiseJob(reprioritiseJob *armadaevents.ReprioritiseJob) ([]DbOperation, error) { +func (c *InstructionConverter) handleReprioritiseJob(reprioritiseJob *armadaevents.ReprioritiseJob, meta eventSequenceCommon) ([]DbOperation, error) { jobId, err := armadaevents.UlidStringFromProtoUuid(reprioritiseJob.GetJobId()) if err != nil { return nil, err } - return []DbOperation{UpdateJobPriorities{ - jobId: int64(reprioritiseJob.Priority), + return []DbOperation{&UpdateJobPriorities{ + key: JobReprioritiseKey{ + JobSetKey: JobSetKey{ + queue: meta.queue, + jobSet: meta.jobset, + }, + Priority: int64(reprioritiseJob.Priority), + }, + jobIds: []string{jobId}, }}, nil } @@ -353,13 +361,16 @@ func (c *InstructionConverter) handleReprioritiseJobSet(reprioritiseJobSet *arma }}, nil } -func (c *InstructionConverter) handleCancelJob(cancelJob *armadaevents.CancelJob) ([]DbOperation, error) { +func (c *InstructionConverter) handleCancelJob(cancelJob *armadaevents.CancelJob, meta eventSequenceCommon) ([]DbOperation, error) { jobId, err := armadaevents.UlidStringFromProtoUuid(cancelJob.GetJobId()) if err != nil { return nil, err } return []DbOperation{MarkJobsCancelRequested{ - jobId: true, + JobSetKey{ + queue: meta.queue, + jobSet: meta.jobset, + }: []string{jobId}, }}, nil } @@ -400,6 +411,16 @@ func (c *InstructionConverter) handlePartitionMarker(pm *armadaevents.PartitionM }}, nil } +func (c *InstructionConverter) handleJobValidated(checked *armadaevents.JobValidated) ([]DbOperation, error) { + jobId, err := armadaevents.UlidStringFromProtoUuid(checked.GetJobId()) + if err != nil { + return nil, err + } + return []DbOperation{ + MarkJobsValidated{jobId: checked.Pools}, + }, nil +} + // schedulingInfoFromSubmitJob returns a minimal representation of a job containing only the info needed by the scheduler. func (c *InstructionConverter) schedulingInfoFromSubmitJob(submitJob *armadaevents.SubmitJob, submitTime time.Time) (*schedulerobjects.JobSchedulingInfo, error) { return SchedulingInfoFromSubmitJob(submitJob, submitTime, c.priorityClasses) @@ -416,7 +437,6 @@ func SchedulingInfoFromSubmitJob(submitJob *armadaevents.SubmitJob, submitTime t SubmitTime: submitTime, Priority: submitJob.Priority, Version: 0, - QueueTtlSeconds: submitJob.QueueTtlSeconds, } // Scheduling requirements specific to the objects that make up this job. diff --git a/internal/scheduleringester/instructions_test.go b/internal/scheduleringester/instructions_test.go index 848c6d9196d..ce079471b1a 100644 --- a/internal/scheduleringester/instructions_test.go +++ b/internal/scheduleringester/instructions_test.go @@ -89,7 +89,7 @@ func TestConvertSequence(t *testing.T) { expected: []DbOperation{MarkRunsForJobPreemptRequested{JobSetKey{queue: f.Queue, jobSet: f.JobSetName}: []string{f.JobIdString}}}, }, "job run preempted": { - events: []*armadaevents.EventSequence_Event{f.JobPreempted}, + events: []*armadaevents.EventSequence_Event{f.JobRunPreempted}, expected: []DbOperation{MarkRunsPreempted{f.RunIdUuid: f.BaseTime}}, }, "lease returned": { @@ -129,7 +129,13 @@ func TestConvertSequence(t *testing.T) { "reprioritise job": { events: []*armadaevents.EventSequence_Event{f.JobReprioritiseRequested}, expected: []DbOperation{ - UpdateJobPriorities{f.JobIdString: f.NewPriority}, + &UpdateJobPriorities{ + key: JobReprioritiseKey{ + JobSetKey: JobSetKey{queue: f.Queue, jobSet: f.JobSetName}, + Priority: f.NewPriority, + }, + jobIds: []string{f.JobIdString}, + }, }, }, "reprioritise jobset": { @@ -141,7 +147,7 @@ func TestConvertSequence(t *testing.T) { "JobCancelRequested": { events: []*armadaevents.EventSequence_Event{f.JobCancelRequested}, expected: []DbOperation{ - MarkJobsCancelRequested{f.JobIdString: true}, + MarkJobsCancelRequested{JobSetKey{queue: f.Queue, jobSet: f.JobSetName}: {f.JobIdString}}, }, }, "JobSetCancelRequested": { @@ -187,6 +193,12 @@ func TestConvertSequence(t *testing.T) { }}, }, }, + "SubmitChecked": { + events: []*armadaevents.EventSequence_Event{f.JobValidated}, + expected: []DbOperation{ + MarkJobsValidated{f.JobIdString: []string{"cpu"}}, + }, + }, "PositionMarker": { events: []*armadaevents.EventSequence_Event{f.PartitionMarker}, expected: []DbOperation{ diff --git a/internal/scheduleringester/schedulerdb.go b/internal/scheduleringester/schedulerdb.go index 5deb71c8da0..ef74b5ac4f7 100644 --- a/internal/scheduleringester/schedulerdb.go +++ b/internal/scheduleringester/schedulerdb.go @@ -14,6 +14,7 @@ import ( "github.com/armadaproject/armada/internal/common/database" "github.com/armadaproject/armada/internal/common/ingest" "github.com/armadaproject/armada/internal/common/ingest/metrics" + "github.com/armadaproject/armada/internal/common/slices" schedulerdb "github.com/armadaproject/armada/internal/scheduler/database" ) @@ -174,10 +175,16 @@ func (s *SchedulerDb) WriteDbOp(ctx *armadacontext.Context, tx pgx.Tx, op DbOper } } case MarkJobsCancelRequested: - jobIds := maps.Keys(o) - err := queries.MarkJobsCancelRequestedById(ctx, jobIds) - if err != nil { - return errors.WithStack(err) + for key, value := range o { + params := schedulerdb.MarkJobsCancelRequestedByIdParams{ + Queue: key.queue, + JobSet: key.jobSet, + JobIds: value, + } + err := queries.MarkJobsCancelRequestedById(ctx, params) + if err != nil { + return errors.WithStack(err) + } } case MarkJobsCancelled: jobIds := maps.Keys(o) @@ -219,17 +226,15 @@ func (s *SchedulerDb) WriteDbOp(ctx *armadacontext.Context, tx pgx.Tx, op DbOper if err != nil { return errors.WithStack(err) } - case UpdateJobPriorities: - // TODO: This will be slow if there's a large number of ids. - // Could be addressed by using a separate table for priority + upsert. - for jobId, priority := range o { - err := queries.UpdateJobPriorityById(ctx, schedulerdb.UpdateJobPriorityByIdParams{ - JobID: jobId, - Priority: priority, - }) - if err != nil { - return errors.WithStack(err) - } + case *UpdateJobPriorities: + err := queries.UpdateJobPriorityById(ctx, schedulerdb.UpdateJobPriorityByIdParams{ + Queue: o.key.queue, + JobSet: o.key.jobSet, + Priority: o.key.Priority, + JobIds: slices.Unique(o.jobIds), + }) + if err != nil { + return errors.WithStack(err) } case MarkRunsSucceeded: successTimes := make([]interface{}, 0, len(o)) @@ -334,6 +339,16 @@ func (s *SchedulerDb) WriteDbOp(ctx *armadacontext.Context, tx pgx.Tx, op DbOper } } return nil + case MarkJobsValidated: + markValidatedSqlStatement := `UPDATE jobs SET validated = true, pools = $1 WHERE job_id = $2` + batch := &pgx.Batch{} + for key, value := range o { + batch.Queue(markValidatedSqlStatement, value, key) + } + err := execBatch(ctx, tx, batch) + if err != nil { + return errors.WithStack(err) + } default: return errors.Errorf("received unexpected op %+v", op) } diff --git a/internal/scheduleringester/schedulerdb_test.go b/internal/scheduleringester/schedulerdb_test.go index c3c50af5b9a..9ad4ea7e2f5 100644 --- a/internal/scheduleringester/schedulerdb_test.go +++ b/internal/scheduleringester/schedulerdb_test.go @@ -32,7 +32,8 @@ func TestWriteOps(t *testing.T) { } scheduledAtPriorities := []int32{5, 10} tests := map[string]struct { - Ops []DbOperation + Ops []DbOperation + ExpectNoUpdates bool }{ "InsertJobs": {Ops: []DbOperation{ InsertJobs{ @@ -44,6 +45,16 @@ func TestWriteOps(t *testing.T) { jobIds[3]: &schedulerdb.Job{JobID: jobIds[3], JobSet: "set2"}, }, }}, + "Submit Check": {Ops: []DbOperation{ + InsertJobs{ + jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], JobSet: "set1"}, + jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], JobSet: "set2"}, + }, + MarkJobsValidated{ + jobIds[0]: []string{"cpu"}, + jobIds[1]: []string{"gpu", "cpu"}, + }, + }}, "InsertRuns": {Ops: []DbOperation{ InsertJobs{ jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}, @@ -78,14 +89,20 @@ func TestWriteOps(t *testing.T) { }}, "UpdateJobPriorities": {Ops: []DbOperation{ InsertJobs{ - jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], JobSet: "set1"}, - jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], JobSet: "set2"}, - jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], JobSet: "set1"}, - jobIds[3]: &schedulerdb.Job{JobID: jobIds[3], JobSet: "set2"}, + jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}, + jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], Queue: testQueueName, JobSet: "set2"}, + jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set1"}, + jobIds[3]: &schedulerdb.Job{JobID: jobIds[3], Queue: testQueueName, JobSet: "set2"}, }, - UpdateJobPriorities{ - jobIds[0]: 1, - jobIds[1]: 3, + &UpdateJobPriorities{ + key: JobReprioritiseKey{ + JobSetKey: JobSetKey{ + queue: testQueueName, + jobSet: "set1", + }, + Priority: 3, + }, + jobIds: []string{jobIds[0], jobIds[2]}, }, }}, "MarkRunsForJobPreemptRequested": {Ops: []DbOperation{ @@ -135,14 +152,14 @@ func TestWriteOps(t *testing.T) { }}, "MarkJobsCancelRequested": {Ops: []DbOperation{ InsertJobs{ - jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], JobSet: "set1"}, - jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], JobSet: "set2"}, - jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], JobSet: "set1"}, - jobIds[3]: &schedulerdb.Job{JobID: jobIds[3], JobSet: "set2"}, + jobIds[0]: &schedulerdb.Job{JobID: jobIds[0], Queue: testQueueName, JobSet: "set1"}, + jobIds[1]: &schedulerdb.Job{JobID: jobIds[1], Queue: testQueueName, JobSet: "set2"}, + jobIds[2]: &schedulerdb.Job{JobID: jobIds[2], Queue: testQueueName, JobSet: "set1"}, + jobIds[3]: &schedulerdb.Job{JobID: jobIds[3], Queue: testQueueName, JobSet: "set2"}, }, MarkJobsCancelRequested{ - jobIds[0]: true, - jobIds[1]: true, + JobSetKey{queue: testQueueName, jobSet: "set1"}: []string{jobIds[0]}, + JobSetKey{queue: testQueueName, jobSet: "set2"}: []string{jobIds[1]}, }, }}, "MarkJobsCancelled": {Ops: []DbOperation{ @@ -350,7 +367,7 @@ func addDefaultValues(op DbOperation) DbOperation { case MarkJobsCancelRequested: case MarkJobsSucceeded: case MarkJobsFailed: - case UpdateJobPriorities: + case *UpdateJobPriorities: case MarkRunsSucceeded: case MarkRunsFailed: case MarkRunsRunning: @@ -490,7 +507,7 @@ func assertOpSuccess(t *testing.T, schedulerDb *SchedulerDb, serials map[string] numChanged := 0 jobIds := make([]string, 0) for _, job := range jobs { - if _, ok := expected[job.JobID]; ok { + if _, ok := expected[JobSetKey{queue: job.Queue, jobSet: job.JobSet}]; ok { assert.True(t, job.CancelRequested) numChanged++ jobIds = append(jobIds, job.JobID) @@ -581,19 +598,20 @@ func assertOpSuccess(t *testing.T, schedulerDb *SchedulerDb, serials map[string] } } assert.Equal(t, len(expected), numChanged) - case UpdateJobPriorities: + case *UpdateJobPriorities: jobs, err := selectNewJobs(ctx, serials["jobs"]) if err != nil { return errors.WithStack(err) } numChanged := 0 for _, job := range jobs { - if priority, ok := expected[job.JobID]; ok { - assert.Equal(t, priority, job.Priority) + jobSetKey := JobSetKey{queue: job.Queue, jobSet: job.JobSet} + if expected.key.JobSetKey == jobSetKey { + assert.Equal(t, expected.key.Priority, job.Priority) numChanged++ } } - assert.Equal(t, len(expected), numChanged) + assert.Equal(t, len(expected.jobIds), numChanged) case MarkRunsSucceeded: jobs, err := selectNewJobs(ctx, 0) if err != nil { @@ -751,6 +769,21 @@ func assertOpSuccess(t *testing.T, schedulerDb *SchedulerDb, serials map[string] assert.Equal(t, expectedMarker.PartitionID, actualMarker.PartitionID) assert.Equal(t, expectedMarker.Created, actualMarker.Created) } + case MarkJobsValidated: + jobs, err := selectNewJobs(ctx, serials["jobs"]) + if err != nil { + return errors.WithStack(err) + } + numChanged := 0 + jobIds := make([]string, 0) + for _, job := range jobs { + if _, ok := expected[job.JobID]; ok { + assert.True(t, job.Validated) + numChanged++ + jobIds = append(jobIds, job.JobID) + } + } + assert.Equal(t, len(expected), numChanged) default: return errors.Errorf("received unexpected op %+v", op) } diff --git a/internal/testsuite/eventwatcher/eventwatcher.go b/internal/testsuite/eventwatcher/eventwatcher.go index 0c83bf5addd..51d619d4050 100644 --- a/internal/testsuite/eventwatcher/eventwatcher.go +++ b/internal/testsuite/eventwatcher/eventwatcher.go @@ -231,8 +231,6 @@ func isTerminalEvent(msg *api.EventMessage) bool { return true case *api.EventMessage_Cancelled: return true - case *api.EventMessage_DuplicateFound: - return true } return false } diff --git a/magefiles/airflow.go b/magefiles/airflow.go index c7b51503572..8a1d970b5d7 100644 --- a/magefiles/airflow.go +++ b/magefiles/airflow.go @@ -85,7 +85,12 @@ func AirflowOperator() error { return fmt.Errorf("failed to create proto-airflow directory: %w", err) } - err = dockerRun("buildx", "build", "-o", "type=docker", "-t", "armada-airflow-operator-builder", "-f", "./build/airflow-operator/Dockerfile", ".") + buildConfig, err := getBuildConfig() + if err != nil { + return err + } + err = dockerBuildImage(NewDockerBuildConfig(buildConfig.PythonBuilderBaseImage), "armada-airflow-operator-builder", "./build/airflow-operator/Dockerfile") + if err != nil { return fmt.Errorf("failed to build Airflow Operator: %w", err) } diff --git a/magefiles/ci.go b/magefiles/ci.go index edbe9c849d8..93369741ffa 100644 --- a/magefiles/ci.go +++ b/magefiles/ci.go @@ -12,8 +12,7 @@ import ( ) func createQueue() error { - outbytes, err := exec.Command(armadaCtl(), "create", "queue", "e2e-test-queue").CombinedOutput() - out := string(outbytes) + out, err := runArmadaCtl("create", "queue", "e2e-test-queue") // check if err text contains "already exists" and ignore if it does if err != nil && !strings.Contains(out, "already exists") { fmt.Println(out) @@ -65,8 +64,7 @@ func CheckForArmadaRunning() error { case <-timeout: return fmt.Errorf("timed out waiting for Armada to start") case <-tick: - outbytes, _ := exec.Command(armadaCtl(), "submit", "./developer/config/job.yaml").CombinedOutput() - out := string(outbytes) + out, _ := runArmadaCtl("submit", "./developer/config/job.yaml") if strings.Contains(out, "Submitted job with id") { // Sleep for 1 second to allow Armada to fully start time.Sleep(1 * time.Second) @@ -78,8 +76,24 @@ func CheckForArmadaRunning() error { } } -func armadaCtl() string { +func runArmadaCtl(args ...string) (string, error) { + armadaCtlArgs := []string{ + "--config", "e2e/config/armadactl_config.yaml", + } + armadaCtlArgs = append(armadaCtlArgs, args...) + outBytes, err := exec.Command(findOrDownloadArmadaCtl(), armadaCtlArgs...).CombinedOutput() + out := string(outBytes) + return out, err +} + +// Finds armadactl to submit with +// We look for local armadactl first, then in path, then try to download one from github releases. +func findOrDownloadArmadaCtl() string { if _, err := os.Stat("./armadactl"); os.IsNotExist(err) { + if path, err := exec.LookPath("armadactl"); err == nil { + return path + } + err = sh.Run("sh", "./docs/local/armadactl.sh") if err != nil { return "" diff --git a/magefiles/cmd.go b/magefiles/cmd.go index b57ec6c60c3..3531d35a9b0 100644 --- a/magefiles/cmd.go +++ b/magefiles/cmd.go @@ -122,7 +122,7 @@ func go_CMD() ([]string, error) { "GARCH=amd64", "-v", fmt.Sprintf("%s:/go", DOCKER_GOPATH_DIR), - "golang:1.20.2-buster", + "golang:1.21-bookworm", }, nil } diff --git a/magefiles/config.go b/magefiles/config.go new file mode 100644 index 00000000000..7f6ca05b0f2 --- /dev/null +++ b/magefiles/config.go @@ -0,0 +1,32 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" +) + +type BuildConfig struct { + DockerRegistries map[string]string `json:"dockerRegistries"` + PythonBuilderBaseImage string `json:"pythonBuilderBaseImage"` +} + +func getBuildConfig() (BuildConfig, error) { + configPath := os.Getenv("ARMADA_BUILD_CONFIG") + if configPath == "" { + return BuildConfig{}, nil + } + + content, err := os.ReadFile(configPath) + if err != nil { + return BuildConfig{}, fmt.Errorf("error reading file: %w", err) + } + + var config BuildConfig + err = json.Unmarshal(content, &config) + if err != nil { + return BuildConfig{}, fmt.Errorf("error parsing JSON: %w", err) + } + + return config, nil +} diff --git a/magefiles/developer.go b/magefiles/developer.go index 6a9cf9f584c..9cb4876f8d5 100644 --- a/magefiles/developer.go +++ b/magefiles/developer.go @@ -49,12 +49,18 @@ func getComponentsList() []string { return strings.Split(os.Getenv("ARMADA_COMPONENTS"), ",") } -// Dependencies include pulsar, postgres (v1 and v2) as well as redis. -func StartDependencies() error { - if onArm() { - os.Setenv("PULSAR_IMAGE", "richgross/pulsar:2.11.0") +// Runs scheduler and lookout migrations +func RunMigrations() error { + migrations := []string{ + "scheduler-migration", + "lookoutv2-migration", } + command := append([]string{"compose", "up", "-d"}, migrations...) + return dockerRun(command...) +} +// Starts armada infrastructure dependencies +func StartDependencies() error { command := append([]string{"compose", "up", "-d"}, dependencies...) return dockerRun(command...) } diff --git a/magefiles/docker.go b/magefiles/docker.go index 47b99246aa5..dbd27b62be6 100644 --- a/magefiles/docker.go +++ b/magefiles/docker.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "strings" semver "github.com/Masterminds/semver/v3" @@ -26,6 +27,38 @@ func dockerRun(args ...string) error { return sh.Run(dockerBinary(), args...) } +type DockerBuildConfig struct { + Platform string + BaseImageOverride string +} + +func NewDockerBuildConfig(baseImageOverride string) DockerBuildConfig { + config := DockerBuildConfig{} + config.BaseImageOverride = baseImageOverride + config.Platform = "x86_64" + return config +} + +func dockerBuildImage(config DockerBuildConfig, imageTag string, dockerFile string) error { + dockerArgs := []string{ + "buildx", "build", "-o", "type=docker", "-t", imageTag, "-f", dockerFile, + } + if config.BaseImageOverride != "" { + dockerArgs = append(dockerArgs, "--build-arg", fmt.Sprintf("BASE_IMAGE=%s", config.BaseImageOverride)) + } + if config.Platform != "" { + dockerArgs = append(dockerArgs, "--build-arg", fmt.Sprintf("PLATFORM=%s", config.Platform)) + } + + dockerArgs = append(dockerArgs, ".") + err := dockerRun(dockerArgs...) + if err != nil { + return fmt.Errorf("failed to build %s from %s: %w", imageTag, dockerFile, err) + } + + return nil +} + func dockerBuildxVersion() (*semver.Version, error) { output, err := dockerOutput("buildx", "version") if err != nil { diff --git a/magefiles/dotnet.go b/magefiles/dotnet.go index 493a53043a0..d73dde14abf 100644 --- a/magefiles/dotnet.go +++ b/magefiles/dotnet.go @@ -78,19 +78,12 @@ func Dotnet() error { dotnetCmd := dotnetCmd() - client := append(dotnetCmd, "dotnet", "build", "./client/DotNet/Armada.Client", "/t:NSwag") + client := append(dotnetCmd, "dotnet", "build", "./client/DotNet/ArmadaProject.Io.Client") output, err := dockerOutput(client...) fmt.Println(output) if err != nil { return err } - - client = append(dotnetCmd, "dotnet", "build", "./client/DotNet/ArmadaProject.Io.Client") - output, err = dockerOutput(client...) - fmt.Println(output) - if err != nil { - return err - } return nil } @@ -100,15 +93,9 @@ func PackNuget() error { fmt.Println("Pack Nuget...") dotnetCmd := dotnetCmd() - build := append(dotnetCmd, "dotnet", "pack", "client/DotNet/Armada.Client/Armada.Client.csproj", "-c", "Release", "-p:PackageVersion="+releaseTag, "-o", "./bin/client/DotNet") - output, err := dockerOutput(build...) - fmt.Println(output) - if err != nil { - return err - } - build = append(dotnetCmd, "dotnet", "pack", "client/DotNet/ArmadaProject.Io.Client/ArmadaProject.Io.Client.csproj", "-c", "Release", "-p:PackageVersion="+releaseTag, "-o", "./bin/client/DotNet") - output, err = dockerOutput(build...) + build := append(dotnetCmd, "dotnet", "pack", "client/DotNet/ArmadaProject.Io.Client/ArmadaProject.Io.Client.csproj", "-c", "Release", "-p:PackageVersion="+releaseTag, "-o", "./bin/client/DotNet") + output, err := dockerOutput(build...) fmt.Println(output) if err != nil { return err diff --git a/magefiles/go.go b/magefiles/go.go index 9b1d6957c9b..15bdf2c3364 100644 --- a/magefiles/go.go +++ b/magefiles/go.go @@ -8,7 +8,7 @@ import ( "github.com/pkg/errors" ) -const GO_VERSION_CONSTRAINT = ">= 1.20.0" +const GO_VERSION_CONSTRAINT = ">= 1.21.0" func goBinary() string { return binaryWithExt("go") diff --git a/magefiles/kind.go b/magefiles/kind.go index f513d5fc761..362b834c64a 100644 --- a/magefiles/kind.go +++ b/magefiles/kind.go @@ -1,40 +1,32 @@ package main import ( + "fmt" "os" "path/filepath" - "runtime" + "regexp" "strings" - semver "github.com/Masterminds/semver/v3" "github.com/magefile/mage/mg" + + semver "github.com/Masterminds/semver/v3" "github.com/magefile/mage/sh" "github.com/pkg/errors" ) const ( - KIND_VERSION_CONSTRAINT = ">= 0.14.0" + KIND_VERSION_CONSTRAINT = ">= 0.21.0" KIND_CONFIG_INTERNAL = ".kube/internal/config" KIND_CONFIG_EXTERNAL = ".kube/external/config" KIND_NAME = "armada-test" ) -func getImages() []string { - images := []string{ - "alpine:3.18.3", - "nginx:1.21.6", - "registry.k8s.io/ingress-nginx/controller:v1.4.0", - "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343", +func getImagesUsedInTestsOrControllers() []string { + return []string{ + "nginx:1.27.0", // Used by ingress-controller + "alpine:3.20.0", + "bitnami/kubectl:1.30", } - // TODO: find suitable kubectl image for arm64 - if !isAppleSilicon() { - images = append(images, "bitnami/kubectl:1.24.8") - } - return images -} - -func isAppleSilicon() bool { - return runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" } func kindBinary() string { @@ -73,18 +65,6 @@ func kindCheck() error { return constraintCheck(version, KIND_VERSION_CONSTRAINT, "kind") } -// Images that need to be available in the Kind cluster, -// e.g., images required for e2e tests. -func kindGetImages() error { - for _, image := range getImages() { - if err := dockerRun("pull", image); err != nil { - return err - } - } - - return nil -} - func kindInitCluster() error { out, err := kindOutput("get", "clusters") if err != nil { @@ -103,23 +83,125 @@ func kindInitCluster() error { return nil } -func kindSetup() error { - mg.Deps(kindInitCluster, kindGetImages) +func imagesFromFile(resourceYamlPath string) ([]string, error) { + content, err := os.ReadFile(resourceYamlPath) + if err != nil { + return nil, fmt.Errorf("error reading file: %w", err) + } + + re := regexp.MustCompile(`(?m)image:\s*([^\s]+)`) + matches := re.FindAllStringSubmatch(string(content), -1) + if matches == nil { + return nil, nil + } + + var images []string + for _, match := range matches { + if len(match) > 1 { + images = append(images, match[1]) + } + } + + return images, nil +} + +func remapDockerRegistryIfRequired(image string, registries map[string]string) string { + for registryFrom, registryTo := range registries { + if strings.HasPrefix(image, registryFrom) { + return registryTo + strings.TrimPrefix(image, registryFrom) + } + } + return image +} + +func remapDockerImagesInKubernetesManifest(filePath string, images []string, buildConfig BuildConfig) (string, error) { + if buildConfig.DockerRegistries == nil { + return filePath, nil + } + + content, err := os.ReadFile(filePath) + if err != nil { + return filePath, fmt.Errorf("error reading manifest: %w", err) + } + + replacedContent := "" + for _, image := range images { + targetImage := remapDockerRegistryIfRequired(image, buildConfig.DockerRegistries) + if targetImage != image { + if replacedContent == "" { + replacedContent = string(content) + } + + replacedContent = strings.ReplaceAll(replacedContent, image, targetImage) + } + } + + if replacedContent == "" { + return filePath, nil + } + + f, err := os.CreateTemp("", "") + if err != nil { + return filePath, fmt.Errorf("error creating temporary file: %w", err) + } + _, err = f.WriteString(replacedContent) + if err != nil { + return filePath, fmt.Errorf("error writing temporary file: %w", err) + } + return f.Name(), nil +} + +func kindSetupExternalImages(buildConfig BuildConfig, images []string) error { + for _, image := range images { + image = remapDockerRegistryIfRequired(image, buildConfig.DockerRegistries) + if err := dockerRun("pull", image); err != nil { + return fmt.Errorf("error pulling image: %w", err) + } - for _, image := range getImages() { err := kindRun("load", "docker-image", image, "--name", KIND_NAME) if err != nil { - return err + return fmt.Errorf("error loading image to kind: %w", err) } } - // Resources to create in the Kind cluster. + + return nil +} + +func kindSetup() error { + mg.Deps(kindInitCluster) + + buildConfig, err := getBuildConfig() + if err != nil { + return err + } + + err = kindSetupExternalImages(buildConfig, getImagesUsedInTestsOrControllers()) + if err != nil { + return err + } + resources := []string{ "e2e/setup/ingress-nginx.yaml", "e2e/setup/priorityclasses.yaml", "e2e/setup/namespace-with-anonymous-user.yaml", } for _, f := range resources { - err := kubectlRun("apply", "-f", f, "--context", "kind-armada-test") + images, err := imagesFromFile(f) + if err != nil { + return err + } + + err = kindSetupExternalImages(buildConfig, images) + if err != nil { + return err + } + + file, err := remapDockerImagesInKubernetesManifest(f, images, buildConfig) + if err != nil { + return err + } + + err = kubectlRun("apply", "-f", file, "--context", "kind-armada-test") if err != nil { return err } diff --git a/magefiles/main.go b/magefiles/main.go index 607296dfe05..e10d4e8035f 100644 --- a/magefiles/main.go +++ b/magefiles/main.go @@ -201,9 +201,6 @@ func LocalDev(arg string) error { mg.Deps(BootstrapTools) fmt.Println("Time to bootstrap tools:", time.Since(timeTaken)) - // Set the Executor Update Frequency to 1 second for local development - os.Setenv("ARMADA_SCHEDULING_EXECUTORUPDATEFREQUENCY", "1s") - switch arg { case "minimal": mg.Deps(mg.F(goreleaserMinimalRelease, "bundle"), Kind, downloadDependencyImages) diff --git a/magefiles/python.go b/magefiles/python.go index 4d8cdbc02a1..a7e055df0e7 100644 --- a/magefiles/python.go +++ b/magefiles/python.go @@ -11,7 +11,12 @@ import ( func BuildPython() error { mg.Deps(BootstrapProto) - err := dockerRun("buildx", "build", "-o", "type=docker", "-t", "armada-python-client-builder", "-f", "./build/python-client/Dockerfile", ".") + buildConfig, err := getBuildConfig() + if err != nil { + return err + } + + err = dockerBuildImage(NewDockerBuildConfig(buildConfig.PythonBuilderBaseImage), "armada-python-client-builder", "./build/python-client/Dockerfile") if err != nil { return err } @@ -21,13 +26,10 @@ func BuildPython() error { return err } - return dockerRun( - "run", + return dockerRun("run", "--rm", "-v", fmt.Sprintf("%s/proto:/proto", wd), "-v", fmt.Sprintf("%s:/go/src/armada", wd), "-w", "/go/src/armada", - "armada-python-client-builder", - "./scripts/build-python-client.sh", - ) + "armada-python-client-builder", "./scripts/build-python-client.sh") } diff --git a/magefiles/utils.go b/magefiles/utils.go index a4985a42b8b..ca77ac0eafd 100644 --- a/magefiles/utils.go +++ b/magefiles/utils.go @@ -43,11 +43,6 @@ func set[S ~[]E, E comparable](s S) map[E]bool { return m } -// Check if the user is on an arm system -func onArm() bool { - return runtime.GOARCH == "arm64" -} - // Check if the user is on a windows system func onWindows() bool { return runtime.GOOS == "windows" diff --git a/pkg/api/api.swagger.go b/pkg/api/api.swagger.go index 7822f967bd6..f922cb52759 100644 --- a/pkg/api/api.swagger.go +++ b/pkg/api/api.swagger.go @@ -624,7 +624,8 @@ func SwaggerJsonTemplate() string { " \"Error\",\n" + " \"Evicted\",\n" + " \"OOM\",\n" + - " \"DeadlineExceeded\"\n" + + " \"DeadlineExceeded\",\n" + + " \"Rejected\"\n" + " ]\n" + " },\n" + " \"apiContainerStatus\": {\n" + @@ -661,15 +662,9 @@ func SwaggerJsonTemplate() string { " \"cancelling\": {\n" + " \"$ref\": \"#/definitions/apiJobCancellingEvent\"\n" + " },\n" + - " \"duplicateFound\": {\n" + - " \"$ref\": \"#/definitions/apiJobDuplicateFoundEvent\"\n" + - " },\n" + " \"failed\": {\n" + " \"$ref\": \"#/definitions/apiJobFailedEvent\"\n" + " },\n" + - " \"failedCompressed\": {\n" + - " \"$ref\": \"#/definitions/apiJobFailedEventCompressed\"\n" + - " },\n" + " \"ingressInfo\": {\n" + " \"$ref\": \"#/definitions/apiJobIngressInfoEvent\"\n" + " },\n" + @@ -715,9 +710,6 @@ func SwaggerJsonTemplate() string { " \"unableToSchedule\": {\n" + " \"$ref\": \"#/definitions/apiJobUnableToScheduleEvent\"\n" + " },\n" + - " \"updated\": {\n" + - " \"$ref\": \"#/definitions/apiJobUpdatedEvent\"\n" + - " },\n" + " \"utilisation\": {\n" + " \"$ref\": \"#/definitions/apiJobUtilisationEvent\"\n" + " }\n" + @@ -1027,27 +1019,6 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + - " \"apiJobDuplicateFoundEvent\": {\n" + - " \"type\": \"object\",\n" + - " \"properties\": {\n" + - " \"created\": {\n" + - " \"type\": \"string\",\n" + - " \"format\": \"date-time\"\n" + - " },\n" + - " \"jobId\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"jobSetId\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"originalJobId\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"queue\": {\n" + - " \"type\": \"string\"\n" + - " }\n" + - " }\n" + - " },\n" + " \"apiJobFailedEvent\": {\n" + " \"type\": \"object\",\n" + " \"properties\": {\n" + @@ -1104,16 +1075,6 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + - " \"apiJobFailedEventCompressed\": {\n" + - " \"type\": \"object\",\n" + - " \"title\": \"Only used internally by Armada\",\n" + - " \"properties\": {\n" + - " \"event\": {\n" + - " \"type\": \"string\",\n" + - " \"format\": \"byte\"\n" + - " }\n" + - " }\n" + - " },\n" + " \"apiJobIngressInfoEvent\": {\n" + " \"type\": \"object\",\n" + " \"properties\": {\n" + @@ -1583,12 +1544,6 @@ func SwaggerJsonTemplate() string { " \"errorIfMissing\": {\n" + " \"type\": \"boolean\"\n" + " },\n" + - " \"forceLegacy\": {\n" + - " \"type\": \"boolean\"\n" + - " },\n" + - " \"forceNew\": {\n" + - " \"type\": \"boolean\"\n" + - " },\n" + " \"fromMessageId\": {\n" + " \"type\": \"string\"\n" + " },\n" + @@ -1700,11 +1655,6 @@ func SwaggerJsonTemplate() string { " \"type\": \"number\",\n" + " \"format\": \"double\"\n" + " },\n" + - " \"queueTtlSeconds\": {\n" + - " \"description\": \"Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.\",\n" + - " \"type\": \"string\",\n" + - " \"format\": \"int64\"\n" + - " },\n" + " \"requiredNodeLabels\": {\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + @@ -1881,30 +1831,6 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + - " \"apiJobUpdatedEvent\": {\n" + - " \"type\": \"object\",\n" + - " \"properties\": {\n" + - " \"created\": {\n" + - " \"type\": \"string\",\n" + - " \"format\": \"date-time\"\n" + - " },\n" + - " \"job\": {\n" + - " \"$ref\": \"#/definitions/apiJob\"\n" + - " },\n" + - " \"jobId\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"jobSetId\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"queue\": {\n" + - " \"type\": \"string\"\n" + - " },\n" + - " \"requestor\": {\n" + - " \"type\": \"string\"\n" + - " }\n" + - " }\n" + - " },\n" + " \"apiJobUtilisationEvent\": {\n" + " \"type\": \"object\",\n" + " \"properties\": {\n" + @@ -2011,6 +1937,7 @@ func SwaggerJsonTemplate() string { " },\n" + " \"resourceLimits\": {\n" + " \"type\": \"object\",\n" + + " \"title\": \"These are ignored and should be removed\",\n" + " \"additionalProperties\": {\n" + " \"type\": \"number\",\n" + " \"format\": \"double\"\n" + @@ -2136,7 +2063,7 @@ func SwaggerJsonTemplate() string { " }\n" + " },\n" + " \"resourceQuantity\": {\n" + - " \"description\": \"The serialization format is:\\n\\n\\u003cquantity\\u003e ::= \\u003csignedNumber\\u003e\\u003csuffix\\u003e\\n(Note that \\u003csuffix\\u003e may be empty, from the \\\"\\\" case in \\u003cdecimalSI\\u003e.)\\n\\u003cdigit\\u003e ::= 0 | 1 | ... | 9\\n\\u003cdigits\\u003e ::= \\u003cdigit\\u003e | \\u003cdigit\\u003e\\u003cdigits\\u003e\\n\\u003cnumber\\u003e ::= \\u003cdigits\\u003e | \\u003cdigits\\u003e.\\u003cdigits\\u003e | \\u003cdigits\\u003e. | .\\u003cdigits\\u003e\\n\\u003csign\\u003e ::= \\\"+\\\" | \\\"-\\\"\\n\\u003csignedNumber\\u003e ::= \\u003cnumber\\u003e | \\u003csign\\u003e\\u003cnumber\\u003e\\n\\u003csuffix\\u003e ::= \\u003cbinarySI\\u003e | \\u003cdecimalExponent\\u003e | \\u003cdecimalSI\\u003e\\n\\u003cbinarySI\\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\u003cdecimalSI\\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\u003cdecimalExponent\\u003e ::= \\\"e\\\" \\u003csignedNumber\\u003e | \\\"E\\\" \\u003csignedNumber\\u003e\\n\\nNo matter which of the three exponent forms is used, no quantity may represent\\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\\nplaces. Numbers larger or more precise will be capped or rounded up.\\n(E.g.: 0.1m will rounded up to 1m.)\\nThis may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix\\nit had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\".\\nThis means that Exponent/suffix will be adjusted up or down (with a\\ncorresponding increase or decrease in Mantissa) such that:\\na. No precision is lost\\nb. No fractional digits will be emitted\\nc. The exponent (or suffix) is as large as possible.\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n1.5 will be serialized as \\\"1500m\\\"\\n1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a\\nfloating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed,\\nbut will be re-emitted in their canonical form. (So always use canonical\\nform, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without\\nwriting some sort of special handling code in the hopes that that will\\ncause implementors to also use a fixed point implementation.\\n\\n+protobuf=true\\n+protobuf.embed=string\\n+protobuf.options.marshal=false\\n+protobuf.options.(gogoproto.goproto_stringer)=false\\n+k8s:deepcopy-gen=true\\n+k8s:openapi-gen=true\",\n" + + " \"description\": \"The serialization format is:\\n\\n```\\n\\u003cquantity\\u003e ::= \\u003csignedNumber\\u003e\\u003csuffix\\u003e\\n\\n(Note that \\u003csuffix\\u003e may be empty, from the \\\"\\\" case in \\u003cdecimalSI\\u003e.)\\n\\n\\u003cdigit\\u003e ::= 0 | 1 | ... | 9\\n\\u003cdigits\\u003e ::= \\u003cdigit\\u003e | \\u003cdigit\\u003e\\u003cdigits\\u003e\\n\\u003cnumber\\u003e ::= \\u003cdigits\\u003e | \\u003cdigits\\u003e.\\u003cdigits\\u003e | \\u003cdigits\\u003e. | .\\u003cdigits\\u003e\\n\\u003csign\\u003e ::= \\\"+\\\" | \\\"-\\\"\\n\\u003csignedNumber\\u003e ::= \\u003cnumber\\u003e | \\u003csign\\u003e\\u003cnumber\\u003e\\n\\u003csuffix\\u003e ::= \\u003cbinarySI\\u003e | \\u003cdecimalExponent\\u003e | \\u003cdecimalSI\\u003e\\n\\u003cbinarySI\\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\\n\\n(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\\n\\n\\u003cdecimalSI\\u003e ::= m | \\\"\\\" | k | M | G | T | P | E\\n\\n(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\\n\\n\\u003cdecimalExponent\\u003e ::= \\\"e\\\" \\u003csignedNumber\\u003e | \\\"E\\\" \\u003csignedNumber\\u003e\\n```\\n\\nNo matter which of the three exponent forms is used, no quantity may represent\\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\\nplaces. Numbers larger or more precise will be capped or rounded up.\\n(E.g.: 0.1m will rounded up to 1m.)\\nThis may be extended in the future if we require larger or smaller quantities.\\n\\nWhen a Quantity is parsed from a string, it will remember the type of suffix\\nit had, and will use the same type again when it is serialized.\\n\\nBefore serializing, Quantity will be put in \\\"canonical form\\\".\\nThis means that Exponent/suffix will be adjusted up or down (with a\\ncorresponding increase or decrease in Mantissa) such that:\\n\\nNo precision is lost\\nNo fractional digits will be emitted\\nThe exponent (or suffix) is as large as possible.\\n\\nThe sign will be omitted unless the number is negative.\\n\\nExamples:\\n\\n1.5 will be serialized as \\\"1500m\\\"\\n1.5Gi will be serialized as \\\"1536Mi\\\"\\n\\nNote that the quantity will NEVER be internally represented by a\\nfloating point number. That is the whole point of this exercise.\\n\\nNon-canonical values will still parse as long as they are well formed,\\nbut will be re-emitted in their canonical form. (So always use canonical\\nform, or don't diff.)\\n\\nThis format is intended to make it difficult to use these numbers without\\nwriting some sort of special handling code in the hopes that that will\\ncause implementors to also use a fixed point implementation.\\n\\n+protobuf=true\\n+protobuf.embed=string\\n+protobuf.options.marshal=false\\n+protobuf.options.(gogoproto.goproto_stringer)=false\\n+k8s:deepcopy-gen=true\\n+k8s:openapi-gen=true\",\n" + " \"type\": \"string\",\n" + " \"title\": \"Quantity is a fixed-point representation of a number.\\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\\nin addition to String() and AsInt64() accessors.\",\n" + " \"x-go-package\": \"k8s.io/apimachinery/pkg/api/resource\"\n" + @@ -2198,23 +2125,23 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Persistent Disk resource in AWS.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"partition\": {\n" + - " \"description\": \"The partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\\"1\\\".\\nSimilarly, the volume partition for /dev/sda is \\\"0\\\" (or you can leave the property empty).\\n+optional\",\n" + + " \"description\": \"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\\"1\\\".\\nSimilarly, the volume partition for /dev/sda is \\\"0\\\" (or you can leave the property empty).\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"Partition\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Specify \\\"true\\\" to force and set the ReadOnly property in VolumeMounts to \\\"true\\\".\\nIf omitted, the default is \\\"false\\\".\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\n+optional\",\n" + + " \"description\": \"readOnly value true will force the readOnly setting in VolumeMounts.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"volumeID\": {\n" + - " \"description\": \"Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\",\n" + + " \"description\": \"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeID\"\n" + " }\n" + @@ -2238,10 +2165,12 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1AzureDataDiskCachingMode\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1AzureDataDiskKind\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -2253,17 +2182,17 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1AzureDataDiskCachingMode\"\n" + " },\n" + " \"diskName\": {\n" + - " \"description\": \"The Name of the data disk in the blob storage\",\n" + + " \"description\": \"diskName is the Name of the data disk in the blob storage\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"DiskName\"\n" + " },\n" + " \"diskURI\": {\n" + - " \"description\": \"The URI the data disk in the blob storage\",\n" + + " \"description\": \"diskURI is the URI of data disk in the blob storage\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"DataDiskURI\"\n" + " },\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + + " \"description\": \"fsType is Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + @@ -2271,7 +2200,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1AzureDataDiskKind\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " }\n" + @@ -2283,17 +2212,17 @@ func SwaggerJsonTemplate() string { " \"title\": \"AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\",\n" + " \"properties\": {\n" + " \"readOnly\": {\n" + - " \"description\": \"Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"secretName\": {\n" + - " \"description\": \"the name of secret that contains Azure Storage Account Name and Key\",\n" + + " \"description\": \"secretName is the name of secret that contains Azure Storage Account Name and Key\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"SecretName\"\n" + " },\n" + " \"shareName\": {\n" + - " \"description\": \"Share Name\",\n" + + " \"description\": \"shareName is the azure share Name\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"ShareName\"\n" + " }\n" + @@ -2305,12 +2234,12 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"driver\": {\n" + - " \"description\": \"Driver is the name of the CSI driver that handles this volume.\\nConsult with your admin for the correct name as registered in the cluster.\",\n" + + " \"description\": \"driver is the name of the CSI driver that handles this volume.\\nConsult with your admin for the correct name as registered in the cluster.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Driver\"\n" + " },\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount. Ex. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\".\\nIf not provided, the empty value is passed to the associated CSI driver\\nwhich will determine the default filesystem to apply.\\n+optional\",\n" + + " \"description\": \"fsType to mount. Ex. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\".\\nIf not provided, the empty value is passed to the associated CSI driver\\nwhich will determine the default filesystem to apply.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + @@ -2318,12 +2247,12 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Specifies a read-only configuration for the volume.\\nDefaults to false (read/write).\\n+optional\",\n" + + " \"description\": \"readOnly specifies a read-only configuration for the volume.\\nDefaults to false (read/write).\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"volumeAttributes\": {\n" + - " \"description\": \"VolumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\\n+optional\",\n" + + " \"description\": \"volumeAttributes stores driver-specific properties that are passed to the CSI\\ndriver. Consult your driver's documentation for supported values.\\n+optional\",\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + " \"type\": \"string\"\n" + @@ -2366,7 +2295,7 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"monitors\": {\n" + - " \"description\": \"Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\",\n" + + " \"description\": \"monitors is Required: Monitors is a collection of Ceph monitors\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -2374,17 +2303,17 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Monitors\"\n" + " },\n" + " \"path\": {\n" + - " \"description\": \"Optional: Used as the mounted root, rather than the full Ceph tree, default is /\\n+optional\",\n" + + " \"description\": \"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"secretFile\": {\n" + - " \"description\": \"Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"SecretFile\"\n" + " },\n" + @@ -2392,7 +2321,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"user\": {\n" + - " \"description\": \"Optional: User is the rados user name, default is admin\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"user is optional: User is the rados user name, default is admin\\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"User\"\n" + " }\n" + @@ -2405,12 +2334,12 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a cinder volume resource in Openstack.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\\n+optional\",\n" + + " \"description\": \"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -2418,13 +2347,31 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"volumeID\": {\n" + - " \"description\": \"volume id used to identify the volume in cinder.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\",\n" + + " \"description\": \"volumeID used to identify the volume in cinder.\\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeID\"\n" + " }\n" + " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1ClaimSource\": {\n" + + " \"description\": \"Exactly one of these fields should be set. Consumers of this type must\\ntreat an empty object as if it has an unknown value.\",\n" + + " \"type\": \"object\",\n" + + " \"title\": \"ClaimSource describes a reference to a ResourceClaim.\",\n" + + " \"properties\": {\n" + + " \"resourceClaimName\": {\n" + + " \"description\": \"ResourceClaimName is the name of a ResourceClaim object in the same\\nnamespace as this pod.\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"ResourceClaimName\"\n" + + " },\n" + + " \"resourceClaimTemplateName\": {\n" + + " \"description\": \"ResourceClaimTemplateName is the name of a ResourceClaimTemplate\\nobject in the same namespace as this pod.\\n\\nThe template will be used to create a new ResourceClaim, which will\\nbe bound to this pod. When this pod is deleted, the ResourceClaim\\nwill also be deleted. The name of the ResourceClaim will be \\u003cpod\\nname\\u003e-\\u003cresource name\\u003e, where \\u003cresource name\\u003e is the\\nPodResourceClaim.Name. Pod validation will reject the pod if the\\nconcatenated name is not valid for a ResourceClaim (e.g. too long).\\n\\nAn existing ResourceClaim with that name that is not owned by the\\npod will not be used for the pod to avoid using an unrelated\\nresource by mistake. Scheduling and pod startup are then blocked\\nuntil the unrelated ResourceClaim is removed.\\n\\nThis field is immutable and no changes will be made to the\\ncorresponding ResourceClaim by the control plane after creating the\\nResourceClaim.\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"ResourceClaimTemplateName\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1ClientIPConfig\": {\n" + " \"description\": \"ClientIPConfig represents the configurations of Client IP based session affinity.\",\n" + " \"type\": \"object\",\n" + @@ -2437,9 +2384,9 @@ func SwaggerJsonTemplate() string { " }\n" + " },\n" + " \"v1Condition\": {\n" + - " \"description\": \"// other fields\\n}\",\n" + + " \"description\": \"type FooStatus struct{\\n\\t // Represents the observations of a foo's current state.\\n\\t // Known .status.conditions.type are: \\\"Available\\\", \\\"Progressing\\\", and \\\"Degraded\\\"\\n\\t // +patchMergeKey=type\\n\\t // +patchStrategy=merge\\n\\t // +listType=map\\n\\t // +listMapKey=type\\n\\t Conditions []metav1.Condition `json:\\\"conditions,omitempty\\\" patchStrategy:\\\"merge\\\" patchMergeKey:\\\"type\\\" protobuf:\\\"bytes,1,rep,name=conditions\\\"`\\n\\n\\t // other fields\\n\\t}\",\n" + " \"type\": \"object\",\n" + - " \"title\": \"Condition contains details for one aspect of the current state of this API Resource.\\n---\\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\\ntype FooStatus struct{\\n // Represents the observations of a foo's current state.\\n // Known .status.conditions.type are: \\\"Available\\\", \\\"Progressing\\\", and \\\"Degraded\\\"\\n // +patchMergeKey=type\\n // +patchStrategy=merge\\n // +listType=map\\n // +listMapKey=type\\n Conditions []metav1.Condition `json:\\\"conditions,omitempty\\\" patchStrategy:\\\"merge\\\" patchMergeKey:\\\"type\\\" protobuf:\\\"bytes,1,rep,name=conditions\\\"`\",\n" + + " \"title\": \"Condition contains details for one aspect of the current state of this API Resource.\\n---\\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\",\n" + " \"properties\": {\n" + " \"lastTransitionTime\": {\n" + " \"title\": \"lastTransitionTime is the last time the condition transitioned from one status to another.\\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.\\n+required\\n+kubebuilder:validation:Required\\n+kubebuilder:validation:Type=string\\n+kubebuilder:validation:Format=date-time\",\n" + @@ -2515,7 +2462,7 @@ func SwaggerJsonTemplate() string { " \"title\": \"Adapts a ConfigMap into a projected volume.\",\n" + " \"properties\": {\n" + " \"items\": {\n" + - " \"description\": \"If unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + + " \"description\": \"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1KeyToPath\"\n" + @@ -2528,7 +2475,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Name\"\n" + " },\n" + " \"optional\": {\n" + - " \"description\": \"Specify whether the ConfigMap or its keys must be defined\\n+optional\",\n" + + " \"description\": \"optional specify whether the ConfigMap or its keys must be defined\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"Optional\"\n" + " }\n" + @@ -2541,13 +2488,13 @@ func SwaggerJsonTemplate() string { " \"title\": \"Adapts a ConfigMap into a volume.\",\n" + " \"properties\": {\n" + " \"defaultMode\": {\n" + - " \"description\": \"Optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + + " \"description\": \"defaultMode is optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDefaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"DefaultMode\"\n" + " },\n" + " \"items\": {\n" + - " \"description\": \"If unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + + " \"description\": \"items if unspecified, each key-value pair in the Data field of the referenced\\nConfigMap will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the ConfigMap,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1KeyToPath\"\n" + @@ -2560,7 +2507,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Name\"\n" + " },\n" + " \"optional\": {\n" + - " \"description\": \"Specify whether the ConfigMap or its keys must be defined\\n+optional\",\n" + + " \"description\": \"optional specify whether the ConfigMap or its keys must be defined\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"Optional\"\n" + " }\n" + @@ -2572,7 +2519,7 @@ func SwaggerJsonTemplate() string { " \"title\": \"A single application container that you want to run within a pod.\",\n" + " \"properties\": {\n" + " \"args\": {\n" + - " \"description\": \"Arguments to the entrypoint.\\nThe docker image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + + " \"description\": \"Arguments to the entrypoint.\\nThe container image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -2580,7 +2527,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Args\"\n" + " },\n" + " \"command\": {\n" + - " \"description\": \"Entrypoint array. Not executed within a shell.\\nThe docker image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + + " \"description\": \"Entrypoint array. Not executed within a shell.\\nThe container image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -2604,7 +2551,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"EnvFrom\"\n" + " },\n" + " \"image\": {\n" + - " \"description\": \"Docker image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\\nThis field is optional to allow higher level config management to default or override\\ncontainer images in workload controllers like Deployments and StatefulSets.\\n+optional\",\n" + + " \"description\": \"Container image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\\nThis field is optional to allow higher level config management to default or override\\ncontainer images in workload controllers like Deployments and StatefulSets.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Image\"\n" + " },\n" + @@ -2623,7 +2570,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Name\"\n" + " },\n" + " \"ports\": {\n" + - " \"description\": \"List of ports to expose from the container. Exposing a port here gives\\nthe system additional information about the network connections a\\ncontainer uses, but is primarily informational. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\\"0.0.0.0\\\" address inside a container will be\\naccessible from the network.\\nCannot be updated.\\n+optional\\n+patchMergeKey=containerPort\\n+patchStrategy=merge\\n+listType=map\\n+listMapKey=containerPort\\n+listMapKey=protocol\",\n" + + " \"description\": \"List of ports to expose from the container. Not specifying a port here\\nDOES NOT prevent that port from being exposed. Any port which is\\nlistening on the default \\\"0.0.0.0\\\" address inside a container will be\\naccessible from the network.\\nModifying this array with strategic merge patch may corrupt the data.\\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\\nCannot be updated.\\n+optional\\n+patchMergeKey=containerPort\\n+patchStrategy=merge\\n+listType=map\\n+listMapKey=containerPort\\n+listMapKey=protocol\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1ContainerPort\"\n" + @@ -2722,6 +2669,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1DNSPolicy\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"DNSPolicy defines how a pod's DNS will be configured.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -2860,11 +2808,12 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1EphemeralContainer\": {\n" + - " \"description\": \"An EphemeralContainer is a container that may be added temporarily to an existing pod for\\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\\nscheduling guarantees, and they will not be restarted when they exit or when a pod is\\nremoved or restarted. If an ephemeral container causes a pod to exceed its resource\\nallocation, the pod may be evicted.\\nEphemeral containers may not be added by directly updating the pod spec. They must be added\\nvia the pod's ephemeralcontainers subresource, and they will appear in the pod spec\\nonce added.\\nThis is an alpha feature enabled by the EphemeralContainers feature flag.\",\n" + + " \"description\": \"To add an ephemeral container, use the ephemeralcontainers subresource of an existing\\nPod. Ephemeral containers may not be removed or restarted.\",\n" + " \"type\": \"object\",\n" + + " \"title\": \"An EphemeralContainer is a temporary container that you may add to an existing Pod for\\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\\nscheduling guarantees, and they will not be restarted when they exit or when a Pod is\\nremoved or restarted. The kubelet may evict a Pod if an ephemeral container causes the\\nPod to exceed its resource allocation.\",\n" + " \"properties\": {\n" + " \"args\": {\n" + - " \"description\": \"Arguments to the entrypoint.\\nThe docker image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + + " \"description\": \"Arguments to the entrypoint.\\nThe image's CMD is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -2872,7 +2821,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Args\"\n" + " },\n" + " \"command\": {\n" + - " \"description\": \"Entrypoint array. Not executed within a shell.\\nThe docker image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + + " \"description\": \"Entrypoint array. Not executed within a shell.\\nThe image's ENTRYPOINT is used if this is not provided.\\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \\\"$$(VAR_NAME)\\\" will\\nproduce the string literal \\\"$(VAR_NAME)\\\". Escaped references will never be expanded, regardless\\nof whether the variable exists or not. Cannot be updated.\\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -2896,7 +2845,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"EnvFrom\"\n" + " },\n" + " \"image\": {\n" + - " \"description\": \"Docker image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\",\n" + + " \"description\": \"Container image name.\\nMore info: https://kubernetes.io/docs/concepts/containers/images\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Image\"\n" + " },\n" + @@ -2915,7 +2864,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Name\"\n" + " },\n" + " \"ports\": {\n" + - " \"description\": \"Ports are not allowed for ephemeral containers.\",\n" + + " \"description\": \"Ports are not allowed for ephemeral containers.\\n+optional\\n+patchMergeKey=containerPort\\n+patchStrategy=merge\\n+listType=map\\n+listMapKey=containerPort\\n+listMapKey=protocol\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1ContainerPort\"\n" + @@ -2945,7 +2894,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"StdinOnce\"\n" + " },\n" + " \"targetContainerName\": {\n" + - " \"description\": \"If set, the name of the container from PodSpec that this ephemeral container targets.\\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\\nIf not set then the ephemeral container is run in whatever namespaces are shared\\nfor the pod. Note that the container runtime must support this feature.\\n+optional\",\n" + + " \"description\": \"If set, the name of the container from PodSpec that this ephemeral container targets.\\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\\n\\nThe container runtime must implement support for this feature. If the runtime does not\\nsupport namespace targeting then the result of setting this field is undefined.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"TargetContainerName\"\n" + " },\n" + @@ -2971,7 +2920,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"VolumeDevices\"\n" + " },\n" + " \"volumeMounts\": {\n" + - " \"description\": \"Pod volumes to mount into the container's filesystem.\\nCannot be updated.\\n+optional\\n+patchMergeKey=mountPath\\n+patchStrategy=merge\",\n" + + " \"description\": \"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\\nCannot be updated.\\n+optional\\n+patchMergeKey=mountPath\\n+patchStrategy=merge\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1VolumeMount\"\n" + @@ -3017,23 +2966,23 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Fibre Channel volume.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"lun\": {\n" + - " \"description\": \"Optional: FC target lun number\\n+optional\",\n" + + " \"description\": \"lun is Optional: FC target lun number\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"Lun\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"targetWWNs\": {\n" + - " \"description\": \"Optional: FC target worldwide names (WWNs)\\n+optional\",\n" + + " \"description\": \"targetWWNs is Optional: FC target worldwide names (WWNs)\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -3041,7 +2990,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"TargetWWNs\"\n" + " },\n" + " \"wwids\": {\n" + - " \"description\": \"Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\\n+optional\",\n" + + " \"description\": \"wwids Optional: FC volume world wide identifiers (wwids)\\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -3062,17 +3011,17 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"driver\": {\n" + - " \"description\": \"Driver is the name of the driver to use for this volume.\",\n" + + " \"description\": \"driver is the name of the driver to use for this volume.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Driver\"\n" + " },\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". The default filesystem depends on FlexVolume script.\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". The default filesystem depends on FlexVolume script.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"options\": {\n" + - " \"description\": \"Optional: Extra command options if any.\\n+optional\",\n" + + " \"description\": \"options is Optional: this field holds extra command options if any.\\n+optional\",\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + " \"type\": \"string\"\n" + @@ -3080,7 +3029,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Options\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Optional: Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly is Optional: defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -3096,12 +3045,12 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Flocker volume mounted by the Flocker agent.\",\n" + " \"properties\": {\n" + " \"datasetName\": {\n" + - " \"description\": \"Name of the dataset stored as metadata -\\u003e name on the dataset for Flocker\\nshould be considered as deprecated\\n+optional\",\n" + + " \"description\": \"datasetName is Name of the dataset stored as metadata -\\u003e name on the dataset for Flocker\\nshould be considered as deprecated\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"DatasetName\"\n" + " },\n" + " \"datasetUUID\": {\n" + - " \"description\": \"UUID of the dataset. This is unique identifier of a Flocker dataset\\n+optional\",\n" + + " \"description\": \"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"DatasetUUID\"\n" + " }\n" + @@ -3114,46 +3063,63 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Persistent Disk resource in Google Compute Engine.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + + " \"description\": \"fsType is filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"partition\": {\n" + - " \"description\": \"The partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\\"1\\\".\\nSimilarly, the volume partition for /dev/sda is \\\"0\\\" (or you can leave the property empty).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\n+optional\",\n" + + " \"description\": \"partition is the partition in the volume that you want to mount.\\nIf omitted, the default is to mount by volume name.\\nExamples: For volume /dev/sda1, you specify the partition as \\\"1\\\".\\nSimilarly, the volume partition for /dev/sda is \\\"0\\\" (or you can leave the property empty).\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"Partition\"\n" + " },\n" + " \"pdName\": {\n" + - " \"description\": \"Unique name of the PD resource in GCE. Used to identify the disk in GCE.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\",\n" + + " \"description\": \"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"PDName\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\n+optional\",\n" + + " \"description\": \"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " }\n" + " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1GRPCAction\": {\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"port\": {\n" + + " \"description\": \"Port number of the gRPC service. Number must be in the range 1 to 65535.\",\n" + + " \"type\": \"integer\",\n" + + " \"format\": \"int32\",\n" + + " \"x-go-name\": \"Port\"\n" + + " },\n" + + " \"service\": {\n" + + " \"description\": \"Service is the name of the service to place in the gRPC HealthCheckRequest\\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\\n\\nIf this is not specified, the default behavior is defined by gRPC.\\n+optional\\n+default=\\\"\\\"\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Service\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1GitRepoVolumeSource\": {\n" + " \"description\": \"DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\\ninto the Pod's container.\",\n" + " \"type\": \"object\",\n" + " \"title\": \"Represents a volume that is populated with the contents of a git repository.\\nGit repo volumes do not support ownership management.\\nGit repo volumes support SELinux relabeling.\",\n" + " \"properties\": {\n" + " \"directory\": {\n" + - " \"description\": \"Target directory name.\\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\\ngit repository. Otherwise, if specified, the volume will contain the git repository in\\nthe subdirectory with the given name.\\n+optional\",\n" + + " \"description\": \"directory is the target directory name.\\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\\ngit repository. Otherwise, if specified, the volume will contain the git repository in\\nthe subdirectory with the given name.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Directory\"\n" + " },\n" + " \"repository\": {\n" + - " \"description\": \"Repository URL\",\n" + + " \"description\": \"repository is the URL\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Repository\"\n" + " },\n" + " \"revision\": {\n" + - " \"description\": \"Commit hash for the specified revision.\\n+optional\",\n" + + " \"description\": \"revision is the commit hash for the specified revision.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Revision\"\n" + " }\n" + @@ -3166,17 +3132,17 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Glusterfs mount that lasts the lifetime of a pod.\",\n" + " \"properties\": {\n" + " \"endpoints\": {\n" + - " \"description\": \"EndpointsName is the endpoint name that details Glusterfs topology.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\",\n" + + " \"description\": \"endpoints is the endpoint name that details Glusterfs topology.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"EndpointsName\"\n" + " },\n" + " \"path\": {\n" + - " \"description\": \"Path is the Glusterfs volume path.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\",\n" + + " \"description\": \"path is the Glusterfs volume path.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\\n+optional\",\n" + + " \"description\": \"readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " }\n" + @@ -3219,7 +3185,7 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"name\": {\n" + - " \"description\": \"The header field name\",\n" + + " \"description\": \"The header field name.\\nThis will be canonicalized upon output, so case-variant names will be understood as the same header.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Name\"\n" + " },\n" + @@ -3262,22 +3228,6 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + - " \"v1Handler\": {\n" + - " \"description\": \"Handler defines a specific action that should be taken\\nTODO: pass structured data to these actions, and document that data here.\",\n" + - " \"type\": \"object\",\n" + - " \"properties\": {\n" + - " \"exec\": {\n" + - " \"$ref\": \"#/definitions/v1ExecAction\"\n" + - " },\n" + - " \"httpGet\": {\n" + - " \"$ref\": \"#/definitions/v1HTTPGetAction\"\n" + - " },\n" + - " \"tcpSocket\": {\n" + - " \"$ref\": \"#/definitions/v1TCPSocketAction\"\n" + - " }\n" + - " },\n" + - " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + - " },\n" + " \"v1HostAlias\": {\n" + " \"description\": \"HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\\npod's hosts file.\",\n" + " \"type\": \"object\",\n" + @@ -3299,6 +3249,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1HostPathType\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -3308,7 +3259,7 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a host path mapped into a pod.\",\n" + " \"properties\": {\n" + " \"path\": {\n" + - " \"description\": \"Path of the directory on the host.\\nIf the path is a symlink, it will follow the link to the real path.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\",\n" + + " \"description\": \"path of the directory on the host.\\nIf the path is a symlink, it will follow the link to the real path.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " },\n" + @@ -3324,43 +3275,43 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents an ISCSI disk.\",\n" + " \"properties\": {\n" + " \"chapAuthDiscovery\": {\n" + - " \"description\": \"whether support iSCSI Discovery CHAP authentication\\n+optional\",\n" + + " \"description\": \"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"DiscoveryCHAPAuth\"\n" + " },\n" + " \"chapAuthSession\": {\n" + - " \"description\": \"whether support iSCSI Session CHAP authentication\\n+optional\",\n" + + " \"description\": \"chapAuthSession defines whether support iSCSI Session CHAP authentication\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"SessionCHAPAuth\"\n" + " },\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"initiatorName\": {\n" + - " \"description\": \"Custom iSCSI Initiator Name.\\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\\n\\u003ctarget portal\\u003e:\\u003cvolume name\\u003e will be created for the connection.\\n+optional\",\n" + + " \"description\": \"initiatorName is the custom iSCSI Initiator Name.\\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\\n\\u003ctarget portal\\u003e:\\u003cvolume name\\u003e will be created for the connection.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"InitiatorName\"\n" + " },\n" + " \"iqn\": {\n" + - " \"description\": \"Target iSCSI Qualified Name.\",\n" + + " \"description\": \"iqn is the target iSCSI Qualified Name.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"IQN\"\n" + " },\n" + " \"iscsiInterface\": {\n" + - " \"description\": \"iSCSI Interface Name that uses an iSCSI transport.\\nDefaults to 'default' (tcp).\\n+optional\",\n" + + " \"description\": \"iscsiInterface is the interface Name that uses an iSCSI transport.\\nDefaults to 'default' (tcp).\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"ISCSIInterface\"\n" + " },\n" + " \"lun\": {\n" + - " \"description\": \"iSCSI Target Lun number.\",\n" + + " \"description\": \"lun represents iSCSI Target Lun number.\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"Lun\"\n" + " },\n" + " \"portals\": {\n" + - " \"description\": \"iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260).\\n+optional\",\n" + + " \"description\": \"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260).\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -3368,7 +3319,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Portals\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\n+optional\",\n" + + " \"description\": \"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -3376,7 +3327,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"targetPortal\": {\n" + - " \"description\": \"iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260).\",\n" + + " \"description\": \"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\\nis other than default (typically TCP ports 860 and 3260).\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"TargetPortal\"\n" + " }\n" + @@ -3415,6 +3366,59 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + + " \"v1IngressLoadBalancerIngress\": {\n" + + " \"description\": \"IngressLoadBalancerIngress represents the status of a load-balancer ingress point.\",\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"hostname\": {\n" + + " \"type\": \"string\",\n" + + " \"title\": \"Hostname is set for load-balancer ingress points that are DNS based.\\n+optional\"\n" + + " },\n" + + " \"ip\": {\n" + + " \"type\": \"string\",\n" + + " \"title\": \"IP is set for load-balancer ingress points that are IP based.\\n+optional\"\n" + + " },\n" + + " \"ports\": {\n" + + " \"type\": \"array\",\n" + + " \"title\": \"Ports provides information about the ports exposed by this LoadBalancer.\\n+listType=atomic\\n+optional\",\n" + + " \"items\": {\n" + + " \"$ref\": \"#/definitions/v1IngressPortStatus\"\n" + + " }\n" + + " }\n" + + " }\n" + + " },\n" + + " \"v1IngressLoadBalancerStatus\": {\n" + + " \"description\": \"IngressLoadBalancerStatus represents the status of a load-balancer.\",\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"ingress\": {\n" + + " \"type\": \"array\",\n" + + " \"title\": \"Ingress is a list containing ingress points for the load-balancer.\\n+optional\",\n" + + " \"items\": {\n" + + " \"$ref\": \"#/definitions/v1IngressLoadBalancerIngress\"\n" + + " }\n" + + " }\n" + + " }\n" + + " },\n" + + " \"v1IngressPortStatus\": {\n" + + " \"type\": \"object\",\n" + + " \"title\": \"IngressPortStatus represents the error condition of a service port\",\n" + + " \"properties\": {\n" + + " \"error\": {\n" + + " \"type\": \"string\",\n" + + " \"title\": \"Error is to record the problem with the service port\\nThe format of the error shall comply with the following rules:\\n- built-in error values shall be specified in this file and those shall use\\n CamelCase names\\n- cloud provider specific error values must have names that comply with the\\n format foo.example.com/CamelCase.\\n---\\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\\n+optional\\n+kubebuilder:validation:Required\\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\\n+kubebuilder:validation:MaxLength=316\"\n" + + " },\n" + + " \"port\": {\n" + + " \"description\": \"Port is the port number of the ingress port.\",\n" + + " \"type\": \"integer\",\n" + + " \"format\": \"int32\"\n" + + " },\n" + + " \"protocol\": {\n" + + " \"type\": \"string\",\n" + + " \"title\": \"Protocol is the protocol of the ingress port.\\nThe supported values are: \\\"TCP\\\", \\\"UDP\\\", \\\"SCTP\\\"\"\n" + + " }\n" + + " }\n" + + " },\n" + " \"v1IngressRule\": {\n" + " \"description\": \"IngressRule represents the rules mapping the paths under a specified host to\\nthe related backend services. Incoming requests are first evaluated for a host\\nmatch, then routed to the backend associated with the matching IngressRuleValue.\",\n" + " \"type\": \"object\",\n" + @@ -3463,7 +3467,7 @@ func SwaggerJsonTemplate() string { " },\n" + " \"ingressClassName\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"IngressClassName is the name of the IngressClass cluster resource. The\\nassociated IngressClass defines which controller will implement the\\nresource. This replaces the deprecated `kubernetes.io/ingress.class`\\nannotation. For backwards compatibility, when that annotation is set, it\\nmust be given precedence over this field. The controller may emit a\\nwarning if the field and annotation have different values.\\nImplementations of this API should ignore Ingresses without a class\\nspecified. An IngressClass resource may be marked as default, which can\\nbe used to set a default value for this field. For more information,\\nrefer to the IngressClass documentation.\\n+optional\"\n" + + " \"title\": \"IngressClassName is the name of an IngressClass cluster resource. Ingress\\ncontroller implementations use this field to know whether they should be\\nserving this Ingress resource, by a transitive connection\\n(controller -\\u003e IngressClass -\\u003e Ingress resource). Although the\\n`kubernetes.io/ingress.class` annotation (simple constant name) was never\\nformally defined, it was widely supported by Ingress controllers to create\\na direct binding between Ingress controller and Ingress resources. Newly\\ncreated Ingress resources should prefer using the field. However, even\\nthough the annotation is officially deprecated, for backwards compatibility\\nreasons, ingress controllers should still honor that annotation if present.\\n+optional\"\n" + " },\n" + " \"rules\": {\n" + " \"type\": \"array\",\n" + @@ -3487,7 +3491,7 @@ func SwaggerJsonTemplate() string { " \"properties\": {\n" + " \"loadBalancer\": {\n" + " \"title\": \"LoadBalancer contains the current status of the load-balancer.\\n+optional\",\n" + - " \"$ref\": \"#/definitions/v1LoadBalancerStatus\"\n" + + " \"$ref\": \"#/definitions/v1IngressLoadBalancerStatus\"\n" + " }\n" + " }\n" + " },\n" + @@ -3513,18 +3517,18 @@ func SwaggerJsonTemplate() string { " \"title\": \"Maps a string key to a path within a volume.\",\n" + " \"properties\": {\n" + " \"key\": {\n" + - " \"description\": \"The key to project.\",\n" + + " \"description\": \"key is the key to project.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Key\"\n" + " },\n" + " \"mode\": {\n" + - " \"description\": \"Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + + " \"description\": \"mode is Optional: mode bits used to set permissions on this file.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nIf not specified, the volume defaultMode will be used.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"Mode\"\n" + " },\n" + " \"path\": {\n" + - " \"description\": \"The relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\",\n" + + " \"description\": \"path is the relative path of the file to map the key to.\\nMay not be an absolute path.\\nMay not contain the path element '..'.\\nMay not start with the string '..'.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " }\n" + @@ -3587,10 +3591,26 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"postStart\": {\n" + - " \"$ref\": \"#/definitions/v1Handler\"\n" + + " \"$ref\": \"#/definitions/v1LifecycleHandler\"\n" + " },\n" + " \"preStop\": {\n" + - " \"$ref\": \"#/definitions/v1Handler\"\n" + + " \"$ref\": \"#/definitions/v1LifecycleHandler\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + + " \"v1LifecycleHandler\": {\n" + + " \"description\": \"LifecycleHandler defines a specific action that should be taken in a lifecycle\\nhook. One and only one of the fields, except TCPSocket must be specified.\",\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"exec\": {\n" + + " \"$ref\": \"#/definitions/v1ExecAction\"\n" + + " },\n" + + " \"httpGet\": {\n" + + " \"$ref\": \"#/definitions/v1HTTPGetAction\"\n" + + " },\n" + + " \"tcpSocket\": {\n" + + " \"$ref\": \"#/definitions/v1TCPSocketAction\"\n" + " }\n" + " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -3683,6 +3703,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n" + " },\n" + " \"v1MountPropagationMode\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"MountPropagationMode describes mount propagation.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -3693,17 +3714,17 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents an NFS mount that lasts the lifetime of a pod.\",\n" + " \"properties\": {\n" + " \"path\": {\n" + - " \"description\": \"Path that is exported by the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\",\n" + + " \"description\": \"path that is exported by the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force\\nthe NFS export to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\\n+optional\",\n" + + " \"description\": \"readOnly here will force the NFS export to be mounted with read-only permissions.\\nDefaults to false.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"server\": {\n" + - " \"description\": \"Server is the hostname or IP address of the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\",\n" + + " \"description\": \"server is the hostname or IP address of the NFS server.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Server\"\n" + " }\n" + @@ -3728,6 +3749,11 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1NodeInclusionPolicy\": {\n" + + " \"description\": \"NodeInclusionPolicy defines the type of node inclusion policy\\n+enum\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1NodeSelector\": {\n" + " \"description\": \"A node selector represents the union of the results of one or more label queries\\nover a set of nodes; that is, it represents the OR of the selectors represented\\nby the node selector terms.\\n+structType=atomic\",\n" + " \"type\": \"object\",\n" + @@ -3744,7 +3770,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1NodeSelectorOperator\": {\n" + - " \"description\": \"A node selector operator is the set of operators that can be used in\\na node selector requirement.\",\n" + + " \"description\": \"A node selector operator is the set of operators that can be used in\\na node selector requirement.\\n+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -3794,6 +3820,11 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1OSName\": {\n" + + " \"type\": \"string\",\n" + + " \"title\": \"OSName is the set of OS'es that can be used in OS.\",\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1ObjectFieldSelector\": {\n" + " \"description\": \"+structType=atomic\",\n" + " \"type\": \"object\",\n" + @@ -3824,11 +3855,6 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-name\": \"Annotations\"\n" + " },\n" + - " \"clusterName\": {\n" + - " \"description\": \"The name of the cluster which the object belongs to.\\nThis is used to distinguish resources with same name and namespace in different clusters.\\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\\n+optional\",\n" + - " \"type\": \"string\",\n" + - " \"x-go-name\": \"ClusterName\"\n" + - " },\n" + " \"creationTimestamp\": {\n" + " \"$ref\": \"#/definitions/v1Time\"\n" + " },\n" + @@ -3850,7 +3876,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Finalizers\"\n" + " },\n" + " \"generateName\": {\n" + - " \"description\": \"GenerateName is an optional prefix, used by the server, to generate a unique\\nname ONLY IF the Name field has not been provided.\\nIf this field is used, the name returned to the client will be different\\nthan the name passed. This value will also be combined with a unique suffix.\\nThe provided value has the same validation rules as the Name field,\\nand may be truncated by the length of the suffix required to make the value\\nunique on the server.\\n\\nIf this field is specified and the generated name exists, the server will\\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\\nshould retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\\n+optional\",\n" + + " \"description\": \"GenerateName is an optional prefix, used by the server, to generate a unique\\nname ONLY IF the Name field has not been provided.\\nIf this field is used, the name returned to the client will be different\\nthan the name passed. This value will also be combined with a unique suffix.\\nThe provided value has the same validation rules as the Name field,\\nand may be truncated by the length of the suffix required to make the value\\nunique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"GenerateName\"\n" + " },\n" + @@ -3900,7 +3926,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"ResourceVersion\"\n" + " },\n" + " \"selfLink\": {\n" + - " \"description\": \"SelfLink is a URL representing this object.\\nPopulated by the system.\\nRead-only.\\n\\nDEPRECATED\\nKubernetes will stop propagating this field in 1.20 release and the field is planned\\nto be removed in 1.21 release.\\n+optional\",\n" + + " \"description\": \"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"SelfLink\"\n" + " },\n" + @@ -3920,7 +3946,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"APIVersion\"\n" + " },\n" + " \"blockOwnerDeletion\": {\n" + - " \"description\": \"If true, AND if the owner has the \\\"foregroundDeletion\\\" finalizer, then\\nthe owner cannot be deleted from the key-value store until this\\nreference is removed.\\nDefaults to false.\\nTo set this field, a user needs \\\"delete\\\" permission of the owner,\\notherwise 422 (Unprocessable Entity) will be returned.\\n+optional\",\n" + + " \"description\": \"If true, AND if the owner has the \\\"foregroundDeletion\\\" finalizer, then\\nthe owner cannot be deleted from the key-value store until this\\nreference is removed.\\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\\nDefaults to false.\\nTo set this field, a user needs \\\"delete\\\" permission of the owner,\\notherwise 422 (Unprocessable Entity) will be returned.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"BlockOwnerDeletion\"\n" + " },\n" + @@ -3946,6 +3972,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n" + " },\n" + " \"v1PersistentVolumeAccessMode\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -3954,7 +3981,7 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"accessModes\": {\n" + - " \"description\": \"AccessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\\n+optional\",\n" + + " \"description\": \"accessModes contains the desired access modes the volume should have.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1PersistentVolumeAccessMode\"\n" + @@ -3965,7 +3992,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1TypedLocalObjectReference\"\n" + " },\n" + " \"dataSourceRef\": {\n" + - " \"$ref\": \"#/definitions/v1TypedLocalObjectReference\"\n" + + " \"$ref\": \"#/definitions/v1TypedObjectReference\"\n" + " },\n" + " \"resources\": {\n" + " \"$ref\": \"#/definitions/v1ResourceRequirements\"\n" + @@ -3974,7 +4001,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LabelSelector\"\n" + " },\n" + " \"storageClassName\": {\n" + - " \"description\": \"Name of the StorageClass required by the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\\n+optional\",\n" + + " \"description\": \"storageClassName is the name of the StorageClass required by the claim.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"StorageClassName\"\n" + " },\n" + @@ -3982,7 +4009,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1PersistentVolumeMode\"\n" + " },\n" + " \"volumeName\": {\n" + - " \"description\": \"VolumeName is the binding reference to the PersistentVolume backing this claim.\\n+optional\",\n" + + " \"description\": \"volumeName is the binding reference to the PersistentVolume backing this claim.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeName\"\n" + " }\n" + @@ -4001,11 +4028,6 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-name\": \"Annotations\"\n" + " },\n" + - " \"clusterName\": {\n" + - " \"description\": \"The name of the cluster which the object belongs to.\\nThis is used to distinguish resources with same name and namespace in different clusters.\\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\\n+optional\",\n" + - " \"type\": \"string\",\n" + - " \"x-go-name\": \"ClusterName\"\n" + - " },\n" + " \"creationTimestamp\": {\n" + " \"$ref\": \"#/definitions/v1Time\"\n" + " },\n" + @@ -4027,7 +4049,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Finalizers\"\n" + " },\n" + " \"generateName\": {\n" + - " \"description\": \"GenerateName is an optional prefix, used by the server, to generate a unique\\nname ONLY IF the Name field has not been provided.\\nIf this field is used, the name returned to the client will be different\\nthan the name passed. This value will also be combined with a unique suffix.\\nThe provided value has the same validation rules as the Name field,\\nand may be truncated by the length of the suffix required to make the value\\nunique on the server.\\n\\nIf this field is specified and the generated name exists, the server will\\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\\nshould retry (optionally after the time indicated in the Retry-After header).\\n\\nApplied only if Name is not specified.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\\n+optional\",\n" + + " \"description\": \"GenerateName is an optional prefix, used by the server, to generate a unique\\nname ONLY IF the Name field has not been provided.\\nIf this field is used, the name returned to the client will be different\\nthan the name passed. This value will also be combined with a unique suffix.\\nThe provided value has the same validation rules as the Name field,\\nand may be truncated by the length of the suffix required to make the value\\nunique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified.\\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"GenerateName\"\n" + " },\n" + @@ -4077,7 +4099,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"ResourceVersion\"\n" + " },\n" + " \"selfLink\": {\n" + - " \"description\": \"SelfLink is a URL representing this object.\\nPopulated by the system.\\nRead-only.\\n\\nDEPRECATED\\nKubernetes will stop propagating this field in 1.20 release and the field is planned\\nto be removed in 1.21 release.\\n+optional\",\n" + + " \"description\": \"Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"SelfLink\"\n" + " },\n" + @@ -4096,12 +4118,12 @@ func SwaggerJsonTemplate() string { " \"title\": \"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.\",\n" + " \"properties\": {\n" + " \"claimName\": {\n" + - " \"description\": \"ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\",\n" + + " \"description\": \"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"ClaimName\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Will force the ReadOnly setting in VolumeMounts.\\nDefault false.\\n+optional\",\n" + + " \"description\": \"readOnly Will force the ReadOnly setting in VolumeMounts.\\nDefault false.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " }\n" + @@ -4109,6 +4131,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1PersistentVolumeMode\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -4118,12 +4141,12 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Photon Controller persistent disk resource.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"pdID\": {\n" + - " \"description\": \"ID that identifies Photon Controller persistent disk\",\n" + + " \"description\": \"pdID is the ID that identifies Photon Controller persistent disk\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"PdID\"\n" + " }\n" + @@ -4164,7 +4187,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LabelSelector\"\n" + " },\n" + " \"namespaces\": {\n" + - " \"description\": \"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\"\\n+optional\",\n" + + " \"description\": \"namespaces specifies a static list of namespace names that the term applies to.\\nThe term is applied to the union of the namespaces listed in this field\\nand the ones selected by namespaceSelector.\\nnull or empty namespaces list and null namespaceSelector means \\\"this pod's namespace\\\".\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -4256,10 +4279,20 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1PodFSGroupChangePolicy\": {\n" + - " \"description\": \"PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume\\nwhen volume is mounted.\",\n" + + " \"description\": \"PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume\\nwhen volume is mounted.\\n+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1PodOS\": {\n" + + " \"type\": \"object\",\n" + + " \"title\": \"PodOS defines the OS parameters of a pod.\",\n" + + " \"properties\": {\n" + + " \"name\": {\n" + + " \"$ref\": \"#/definitions/v1OSName\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1PodReadinessGate\": {\n" + " \"description\": \"PodReadinessGate contains the reference to a pod condition\",\n" + " \"type\": \"object\",\n" + @@ -4270,13 +4303,41 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1PodResourceClaim\": {\n" + + " \"description\": \"It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.\\nContainers that need access to the ResourceClaim reference it with this name.\",\n" + + " \"type\": \"object\",\n" + + " \"title\": \"PodResourceClaim references exactly one ResourceClaim through a ClaimSource.\",\n" + + " \"properties\": {\n" + + " \"name\": {\n" + + " \"description\": \"Name uniquely identifies this resource claim inside the pod.\\nThis must be a DNS_LABEL.\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Name\"\n" + + " },\n" + + " \"source\": {\n" + + " \"$ref\": \"#/definitions/v1ClaimSource\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + + " \"v1PodSchedulingGate\": {\n" + + " \"type\": \"object\",\n" + + " \"title\": \"PodSchedulingGate is associated to a Pod to guard its scheduling.\",\n" + + " \"properties\": {\n" + + " \"name\": {\n" + + " \"description\": \"Name of the scheduling gate.\\nEach scheduling gate must have a unique name field.\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Name\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1PodSecurityContext\": {\n" + " \"description\": \"Some fields are also present in container.securityContext. Field values of\\ncontainer.securityContext take precedence over field values of PodSecurityContext.\",\n" + " \"type\": \"object\",\n" + " \"title\": \"PodSecurityContext holds pod-level security attributes and common container settings.\",\n" + " \"properties\": {\n" + " \"fsGroup\": {\n" + - " \"description\": \"A special supplemental group that applies to all containers in a pod.\\nSome volume types allow the Kubelet to change the ownership of that volume\\nto be owned by the pod:\\n\\n1. The owning GID will be the FSGroup\\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\\n3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\\n+optional\",\n" + + " \"description\": \"A special supplemental group that applies to all containers in a pod.\\nSome volume types allow the Kubelet to change the ownership of that volume\\nto be owned by the pod:\\n\\n1. The owning GID will be the FSGroup\\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\\n3. The permission bits are OR'd with rw-rw----\\n\\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"FSGroup\"\n" + @@ -4285,7 +4346,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1PodFSGroupChangePolicy\"\n" + " },\n" + " \"runAsGroup\": {\n" + - " \"description\": \"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\n+optional\",\n" + + " \"description\": \"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"RunAsGroup\"\n" + @@ -4296,7 +4357,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"RunAsNonRoot\"\n" + " },\n" + " \"runAsUser\": {\n" + - " \"description\": \"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\n+optional\",\n" + + " \"description\": \"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in SecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence\\nfor that container.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"RunAsUser\"\n" + @@ -4308,7 +4369,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1SeccompProfile\"\n" + " },\n" + " \"supplementalGroups\": {\n" + - " \"description\": \"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID. If unspecified, no groups will be added to\\nany container.\\n+optional\",\n" + + " \"description\": \"A list of groups applied to the first process run in each container, in addition\\nto the container's primary GID, the fsGroup (if specified), and group memberships\\ndefined in the container image for the uid of the container process. If unspecified,\\nno additional groups are added to any container. Note that group memberships\\ndefined in the container image for the uid of the container process are still effective,\\neven if they are not included in this list.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"integer\",\n" + @@ -4317,7 +4378,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"SupplementalGroups\"\n" + " },\n" + " \"sysctls\": {\n" + - " \"description\": \"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\n+optional\",\n" + + " \"description\": \"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\\nsysctls (by the container runtime) might fail to launch.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1Sysctl\"\n" + @@ -4368,7 +4429,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"EnableServiceLinks\"\n" + " },\n" + " \"ephemeralContainers\": {\n" + - " \"description\": \"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\\npod to perform user-initiated actions such as debugging. This list cannot be specified when\\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\\nThis field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\\n+optional\\n+patchMergeKey=name\\n+patchStrategy=merge\",\n" + + " \"description\": \"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\\npod to perform user-initiated actions such as debugging. This list cannot be specified when\\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\\n+optional\\n+patchMergeKey=name\\n+patchStrategy=merge\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1EphemeralContainer\"\n" + @@ -4398,13 +4459,18 @@ func SwaggerJsonTemplate() string { " \"type\": \"boolean\",\n" + " \"x-go-name\": \"HostPID\"\n" + " },\n" + + " \"hostUsers\": {\n" + + " \"description\": \"Use the host's user namespace.\\nOptional: Default to true.\\nIf set to true or not present, the pod will be run in the host user namespace, useful\\nfor when the pod needs a feature only available to the host user namespace, such as\\nloading a kernel module with CAP_SYS_MODULE.\\nWhen set to false, a new userns is created for the pod. Setting false is useful for\\nmitigating container breakout vulnerabilities even allowing users to run their\\ncontainers as root without actually having root privileges on the host.\\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\\n+k8s:conversion-gen=false\\n+optional\",\n" + + " \"type\": \"boolean\",\n" + + " \"x-go-name\": \"HostUsers\"\n" + + " },\n" + " \"hostname\": {\n" + " \"description\": \"Specifies the hostname of the Pod\\nIf not specified, the pod's hostname will be set to a system-defined value.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Hostname\"\n" + " },\n" + " \"imagePullSecrets\": {\n" + - " \"description\": \"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\\nin the case of docker, only DockerConfig type secrets are honored.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\\n+optional\\n+patchMergeKey=name\\n+patchStrategy=merge\",\n" + + " \"description\": \"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\\nIf specified, these secrets will be passed to individual puller implementations for them to use.\\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\\n+optional\\n+patchMergeKey=name\\n+patchStrategy=merge\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + @@ -4432,6 +4498,9 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-name\": \"NodeSelector\"\n" + " },\n" + + " \"os\": {\n" + + " \"$ref\": \"#/definitions/v1PodOS\"\n" + + " },\n" + " \"overhead\": {\n" + " \"$ref\": \"#/definitions/v1ResourceList\"\n" + " },\n" + @@ -4457,11 +4526,19 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-name\": \"ReadinessGates\"\n" + " },\n" + + " \"resourceClaims\": {\n" + + " \"description\": \"ResourceClaims defines which ResourceClaims must be allocated\\nand reserved before the Pod is allowed to start. The resources\\nwill be made available to those containers which consume them\\nby name.\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\nThis field is immutable.\\n\\n+patchMergeKey=name\\n+patchStrategy=merge,retainKeys\\n+listType=map\\n+listMapKey=name\\n+featureGate=DynamicResourceAllocation\\n+optional\",\n" + + " \"type\": \"array\",\n" + + " \"items\": {\n" + + " \"$ref\": \"#/definitions/v1PodResourceClaim\"\n" + + " },\n" + + " \"x-go-name\": \"ResourceClaims\"\n" + + " },\n" + " \"restartPolicy\": {\n" + " \"$ref\": \"#/definitions/v1RestartPolicy\"\n" + " },\n" + " \"runtimeClassName\": {\n" + - " \"description\": \"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\\nIf unset or empty, the \\\"legacy\\\" RuntimeClass will be used, which is an implicit class with an\\nempty definition that uses the default runtime handler.\\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class\\nThis is a beta feature as of Kubernetes v1.14.\\n+optional\",\n" + + " \"description\": \"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\\nIf unset or empty, the \\\"legacy\\\" RuntimeClass will be used, which is an implicit class with an\\nempty definition that uses the default runtime handler.\\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"RuntimeClassName\"\n" + " },\n" + @@ -4470,6 +4547,14 @@ func SwaggerJsonTemplate() string { " \"type\": \"string\",\n" + " \"x-go-name\": \"SchedulerName\"\n" + " },\n" + + " \"schedulingGates\": {\n" + + " \"description\": \"SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\\nMore info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.\\n\\nThis is an alpha-level feature enabled by PodSchedulingReadiness feature gate.\\n+optional\\n+patchMergeKey=name\\n+patchStrategy=merge\\n+listType=map\\n+listMapKey=name\",\n" + + " \"type\": \"array\",\n" + + " \"items\": {\n" + + " \"$ref\": \"#/definitions/v1PodSchedulingGate\"\n" + + " },\n" + + " \"x-go-name\": \"SchedulingGates\"\n" + + " },\n" + " \"securityContext\": {\n" + " \"$ref\": \"#/definitions/v1PodSecurityContext\"\n" + " },\n" + @@ -4554,17 +4639,17 @@ func SwaggerJsonTemplate() string { " \"title\": \"PortworxVolumeSource represents a Portworx volume resource.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"FSType represents the filesystem type to mount\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\",\n" + + " \"description\": \"fSType represents the filesystem type to mount\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"volumeID\": {\n" + - " \"description\": \"VolumeID uniquely identifies a Portworx volume\",\n" + + " \"description\": \"volumeID uniquely identifies a Portworx volume\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeID\"\n" + " }\n" + @@ -4572,6 +4657,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1PreemptionPolicy\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"PreemptionPolicy describes a policy for if/when to preempt a pod.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -4605,6 +4691,9 @@ func SwaggerJsonTemplate() string { " \"format\": \"int32\",\n" + " \"x-go-name\": \"FailureThreshold\"\n" + " },\n" + + " \"grpc\": {\n" + + " \"$ref\": \"#/definitions/v1GRPCAction\"\n" + + " },\n" + " \"httpGet\": {\n" + " \"$ref\": \"#/definitions/v1HTTPGetAction\"\n" + " },\n" + @@ -4645,6 +4734,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1ProcMountType\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -4653,13 +4743,13 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"defaultMode\": {\n" + - " \"description\": \"Mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + + " \"description\": \"defaultMode are the mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"DefaultMode\"\n" + " },\n" + " \"sources\": {\n" + - " \"description\": \"list of volume projections\\n+optional\",\n" + + " \"description\": \"sources is the list of volume projections\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1VolumeProjection\"\n" + @@ -4670,12 +4760,13 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1Protocol\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"Protocol defines network protocols supported for things like container ports.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1PullPolicy\": {\n" + - " \"description\": \"PullPolicy describes a policy for if/when to pull a container image\",\n" + + " \"description\": \"PullPolicy describes a policy for if/when to pull a container image\\n+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -4685,32 +4776,32 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Quobyte mount that lasts the lifetime of a pod.\",\n" + " \"properties\": {\n" + " \"group\": {\n" + - " \"description\": \"Group to map volume access to\\nDefault is no group\\n+optional\",\n" + + " \"description\": \"group to map volume access to\\nDefault is no group\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Group\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\\nDefaults to false.\\n+optional\",\n" + + " \"description\": \"readOnly here will force the Quobyte volume to be mounted with read-only permissions.\\nDefaults to false.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + " \"registry\": {\n" + - " \"description\": \"Registry represents a single or multiple Quobyte Registry services\\nspecified as a string as host:port pair (multiple entries are separated with commas)\\nwhich acts as the central registry for volumes\",\n" + + " \"description\": \"registry represents a single or multiple Quobyte Registry services\\nspecified as a string as host:port pair (multiple entries are separated with commas)\\nwhich acts as the central registry for volumes\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Registry\"\n" + " },\n" + " \"tenant\": {\n" + - " \"description\": \"Tenant owning the given Quobyte volume in the Backend\\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\\n+optional\",\n" + + " \"description\": \"tenant owning the given Quobyte volume in the Backend\\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Tenant\"\n" + " },\n" + " \"user\": {\n" + - " \"description\": \"User to map volume access to\\nDefaults to serivceaccount user\\n+optional\",\n" + + " \"description\": \"user to map volume access to\\nDefaults to serivceaccount user\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"User\"\n" + " },\n" + " \"volume\": {\n" + - " \"description\": \"Volume is a string that references an already created Quobyte volume by name.\",\n" + + " \"description\": \"volume is a string that references an already created Quobyte volume by name.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Volume\"\n" + " }\n" + @@ -4723,22 +4814,22 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a Rados Block Device mount that lasts the lifetime of a pod.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type of the volume that you want to mount.\\nTip: Ensure that the filesystem type is supported by the host operating system.\\nExamples: \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\\nTODO: how do we prevent errors in the filesystem from compromising the machine\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"image\": {\n" + - " \"description\": \"The rados image name.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\",\n" + + " \"description\": \"image is the rados image name.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"RBDImage\"\n" + " },\n" + " \"keyring\": {\n" + - " \"description\": \"Keyring is the path to key ring for RBDUser.\\nDefault is /etc/ceph/keyring.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"keyring is the path to key ring for RBDUser.\\nDefault is /etc/ceph/keyring.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Keyring\"\n" + " },\n" + " \"monitors\": {\n" + - " \"description\": \"A collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\",\n" + + " \"description\": \"monitors is a collection of Ceph monitors.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -4746,12 +4837,12 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"CephMonitors\"\n" + " },\n" + " \"pool\": {\n" + - " \"description\": \"The rados pool name.\\nDefault is rbd.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"pool is the rados pool name.\\nDefault is rbd.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"RBDPool\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"ReadOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"readOnly here will force the ReadOnly setting in VolumeMounts.\\nDefaults to false.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -4759,13 +4850,25 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"user\": {\n" + - " \"description\": \"The rados user name.\\nDefault is admin.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + + " \"description\": \"user is the rados user name.\\nDefault is admin.\\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"RadosUser\"\n" + " }\n" + " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1ResourceClaim\": {\n" + + " \"type\": \"object\",\n" + + " \"title\": \"ResourceClaim references one entry in PodSpec.ResourceClaims.\",\n" + + " \"properties\": {\n" + + " \"name\": {\n" + + " \"description\": \"Name must match the name of one entry in pod.spec.resourceClaims of\\nthe Pod where this field is used. It makes that resource available\\ninside a container.\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Name\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1ResourceFieldSelector\": {\n" + " \"description\": \"ResourceFieldSelector represents container resources (cpu, memory) and their output format\\n+structType=atomic\",\n" + " \"type\": \"object\",\n" + @@ -4798,6 +4901,14 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"title\": \"ResourceRequirements describes the compute resource requirements.\",\n" + " \"properties\": {\n" + + " \"claims\": {\n" + + " \"description\": \"Claims lists the names of resources, defined in spec.resourceClaims,\\nthat are used by this container.\\n\\nThis is an alpha field and requires enabling the\\nDynamicResourceAllocation feature gate.\\n\\nThis field is immutable. It can only be set for containers.\\n\\n+listType=map\\n+listMapKey=name\\n+featureGate=DynamicResourceAllocation\\n+optional\",\n" + + " \"type\": \"array\",\n" + + " \"items\": {\n" + + " \"$ref\": \"#/definitions/v1ResourceClaim\"\n" + + " },\n" + + " \"x-go-name\": \"Claims\"\n" + + " },\n" + " \"limits\": {\n" + " \"$ref\": \"#/definitions/v1ResourceList\"\n" + " },\n" + @@ -4808,7 +4919,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1RestartPolicy\": {\n" + - " \"description\": \"Only one of the following restart policies may be specified.\\nIf none of the following policies is specified, the default one\\nis RestartPolicyAlways.\",\n" + + " \"description\": \"Only one of the following restart policies may be specified.\\nIf none of the following policies is specified, the default one\\nis RestartPolicyAlways.\\n+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"RestartPolicy describes how the container should be restarted.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -4845,22 +4956,22 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\".\\nDefault is \\\"xfs\\\".\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\".\\nDefault is \\\"xfs\\\".\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"gateway\": {\n" + - " \"description\": \"The host address of the ScaleIO API Gateway.\",\n" + + " \"description\": \"gateway is the host address of the ScaleIO API Gateway.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Gateway\"\n" + " },\n" + " \"protectionDomain\": {\n" + - " \"description\": \"The name of the ScaleIO Protection Domain for the configured storage.\\n+optional\",\n" + + " \"description\": \"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"ProtectionDomain\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -4868,27 +4979,27 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"sslEnabled\": {\n" + - " \"description\": \"Flag to enable/disable SSL communication with Gateway, default false\\n+optional\",\n" + + " \"description\": \"sslEnabled Flag enable/disable SSL communication with Gateway, default false\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"SSLEnabled\"\n" + " },\n" + " \"storageMode\": {\n" + - " \"description\": \"Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\\nDefault is ThinProvisioned.\\n+optional\",\n" + + " \"description\": \"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\\nDefault is ThinProvisioned.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"StorageMode\"\n" + " },\n" + " \"storagePool\": {\n" + - " \"description\": \"The ScaleIO Storage Pool associated with the protection domain.\\n+optional\",\n" + + " \"description\": \"storagePool is the ScaleIO Storage Pool associated with the protection domain.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"StoragePool\"\n" + " },\n" + " \"system\": {\n" + - " \"description\": \"The name of the storage system as configured in ScaleIO.\",\n" + + " \"description\": \"system is the name of the storage system as configured in ScaleIO.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"System\"\n" + " },\n" + " \"volumeName\": {\n" + - " \"description\": \"The name of a volume already created in the ScaleIO system\\nthat is associated with this volume source.\",\n" + + " \"description\": \"volumeName is the name of a volume already created in the ScaleIO system\\nthat is associated with this volume source.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeName\"\n" + " }\n" + @@ -4912,6 +5023,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1SeccompProfileType\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"SeccompProfileType defines the supported seccomp profile types.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -4963,7 +5075,7 @@ func SwaggerJsonTemplate() string { " \"title\": \"Adapts a secret into a projected volume.\",\n" + " \"properties\": {\n" + " \"items\": {\n" + - " \"description\": \"If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + + " \"description\": \"items if unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1KeyToPath\"\n" + @@ -4976,7 +5088,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Name\"\n" + " },\n" + " \"optional\": {\n" + - " \"description\": \"Specify whether the Secret or its key must be defined\\n+optional\",\n" + + " \"description\": \"optional field specify whether the Secret or its key must be defined\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"Optional\"\n" + " }\n" + @@ -4989,13 +5101,13 @@ func SwaggerJsonTemplate() string { " \"title\": \"Adapts a Secret into a volume.\",\n" + " \"properties\": {\n" + " \"defaultMode\": {\n" + - " \"description\": \"Optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values\\nfor mode bits. Defaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + + " \"description\": \"defaultMode is Optional: mode bits used to set permissions on created files by default.\\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\\nYAML accepts both octal and decimal values, JSON requires decimal values\\nfor mode bits. Defaults to 0644.\\nDirectories within the path are not affected by this setting.\\nThis might be in conflict with other options that affect the file\\nmode, like fsGroup, and the result can be other mode bits set.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"DefaultMode\"\n" + " },\n" + " \"items\": {\n" + - " \"description\": \"If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + + " \"description\": \"items If unspecified, each key-value pair in the Data field of the referenced\\nSecret will be projected into the volume as a file whose name is the\\nkey and content is the value. If specified, the listed keys will be\\nprojected into the specified paths, and unlisted keys will not be\\npresent. If a key is specified which is not present in the Secret,\\nthe volume setup will error unless it is marked optional. Paths must be\\nrelative and may not contain the '..' path or start with '..'.\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"$ref\": \"#/definitions/v1KeyToPath\"\n" + @@ -5003,12 +5115,12 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"Items\"\n" + " },\n" + " \"optional\": {\n" + - " \"description\": \"Specify whether the Secret or its keys must be defined\\n+optional\",\n" + + " \"description\": \"optional field specify whether the Secret or its keys must be defined\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"Optional\"\n" + " },\n" + " \"secretName\": {\n" + - " \"description\": \"Name of the secret in the pod's namespace to use.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\\n+optional\",\n" + + " \"description\": \"secretName is the name of the secret in the pod's namespace to use.\\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"SecretName\"\n" + " }\n" + @@ -5021,7 +5133,7 @@ func SwaggerJsonTemplate() string { " \"title\": \"SecurityContext holds security configuration that will be applied to a container.\",\n" + " \"properties\": {\n" + " \"allowPrivilegeEscalation\": {\n" + - " \"description\": \"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\n+optional\",\n" + + " \"description\": \"AllowPrivilegeEscalation controls whether a process can gain more\\nprivileges than its parent process. This bool directly controls if\\nthe no_new_privs flag will be set on the container process.\\nAllowPrivilegeEscalation is true always when the container is:\\n1) run as Privileged\\n2) has CAP_SYS_ADMIN\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"AllowPrivilegeEscalation\"\n" + " },\n" + @@ -5029,7 +5141,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1Capabilities\"\n" + " },\n" + " \"privileged\": {\n" + - " \"description\": \"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\n+optional\",\n" + + " \"description\": \"Run container in privileged mode.\\nProcesses in privileged containers are essentially equivalent to root on the host.\\nDefaults to false.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"Privileged\"\n" + " },\n" + @@ -5037,12 +5149,12 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1ProcMountType\"\n" + " },\n" + " \"readOnlyRootFilesystem\": {\n" + - " \"description\": \"Whether this container has a read-only root filesystem.\\nDefault is false.\\n+optional\",\n" + + " \"description\": \"Whether this container has a read-only root filesystem.\\nDefault is false.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnlyRootFilesystem\"\n" + " },\n" + " \"runAsGroup\": {\n" + - " \"description\": \"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\n+optional\",\n" + + " \"description\": \"The GID to run the entrypoint of the container process.\\nUses runtime default if unset.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"RunAsGroup\"\n" + @@ -5053,7 +5165,7 @@ func SwaggerJsonTemplate() string { " \"x-go-name\": \"RunAsNonRoot\"\n" + " },\n" + " \"runAsUser\": {\n" + - " \"description\": \"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\n+optional\",\n" + + " \"description\": \"The UID to run the entrypoint of the container process.\\nDefaults to user specified in image metadata if unspecified.\\nMay also be set in PodSecurityContext. If set in both SecurityContext and\\nPodSecurityContext, the value specified in SecurityContext takes precedence.\\nNote that this field cannot be set when spec.os.name is windows.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"RunAsUser\"\n" + @@ -5093,18 +5205,18 @@ func SwaggerJsonTemplate() string { " \"type\": \"object\",\n" + " \"properties\": {\n" + " \"audience\": {\n" + - " \"description\": \"Audience is the intended audience of the token. A recipient of a token\\nmust identify itself with an identifier specified in the audience of the\\ntoken, and otherwise should reject the token. The audience defaults to the\\nidentifier of the apiserver.\\n+optional\",\n" + + " \"description\": \"audience is the intended audience of the token. A recipient of a token\\nmust identify itself with an identifier specified in the audience of the\\ntoken, and otherwise should reject the token. The audience defaults to the\\nidentifier of the apiserver.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Audience\"\n" + " },\n" + " \"expirationSeconds\": {\n" + - " \"description\": \"ExpirationSeconds is the requested duration of validity of the service\\naccount token. As the token approaches expiration, the kubelet volume\\nplugin will proactively rotate the service account token. The kubelet will\\nstart trying to rotate the token if the token is older than 80 percent of\\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\\nand must be at least 10 minutes.\\n+optional\",\n" + + " \"description\": \"expirationSeconds is the requested duration of validity of the service\\naccount token. As the token approaches expiration, the kubelet volume\\nplugin will proactively rotate the service account token. The kubelet will\\nstart trying to rotate the token if the token is older than 80 percent of\\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\\nand must be at least 10 minutes.\\n+optional\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int64\",\n" + " \"x-go-name\": \"ExpirationSeconds\"\n" + " },\n" + " \"path\": {\n" + - " \"description\": \"Path is the path relative to the mount point of the file to project the\\ntoken into.\",\n" + + " \"description\": \"path is the path relative to the mount point of the file to project the\\ntoken into.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Path\"\n" + " }\n" + @@ -5132,7 +5244,7 @@ func SwaggerJsonTemplate() string { " \"properties\": {\n" + " \"appProtocol\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"The application protocol for this port.\\nThis field follows standard Kubernetes label syntax.\\nUn-prefixed names are reserved for IANA standard service names (as per\\nRFC-6335 and http://www.iana.org/assignments/service-names).\\nNon-standard protocols should use prefixed names such as\\nmycompany.com/my-custom-protocol.\\n+optional\"\n" + + " \"title\": \"The application protocol for this port.\\nThis field follows standard Kubernetes label syntax.\\nUn-prefixed names are reserved for IANA standard service names (as per\\nRFC-6335 and https://www.iana.org/assignments/service-names).\\nNon-standard protocols should use prefixed names such as\\nmycompany.com/my-custom-protocol.\\n+optional\"\n" + " },\n" + " \"name\": {\n" + " \"type\": \"string\",\n" + @@ -5164,14 +5276,14 @@ func SwaggerJsonTemplate() string { " \"properties\": {\n" + " \"allocateLoadBalancerNodePorts\": {\n" + " \"type\": \"boolean\",\n" + - " \"title\": \"allocateLoadBalancerNodePorts defines if NodePorts will be automatically\\nallocated for services with type LoadBalancer. Default is \\\"true\\\". It\\nmay be set to \\\"false\\\" if the cluster load-balancer does not rely on\\nNodePorts. If the caller requests specific NodePorts (by specifying a\\nvalue), those requests will be respected, regardless of this field.\\nThis field may only be set for services with type LoadBalancer and will\\nbe cleared if the type is changed to any other type.\\nThis field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.\\n+featureGate=ServiceLBNodePortControl\\n+optional\"\n" + + " \"title\": \"allocateLoadBalancerNodePorts defines if NodePorts will be automatically\\nallocated for services with type LoadBalancer. Default is \\\"true\\\". It\\nmay be set to \\\"false\\\" if the cluster load-balancer does not rely on\\nNodePorts. If the caller requests specific NodePorts (by specifying a\\nvalue), those requests will be respected, regardless of this field.\\nThis field may only be set for services with type LoadBalancer and will\\nbe cleared if the type is changed to any other type.\\n+optional\"\n" + " },\n" + " \"clusterIP\": {\n" + " \"type\": \"string\",\n" + " \"title\": \"clusterIP is the IP address of the service and is usually assigned\\nrandomly. If an address is specified manually, is in-range (as per\\nsystem configuration), and is not in use, it will be allocated to the\\nservice; otherwise creation of the service will fail. This field may not\\nbe changed through updates unless the type field is also being changed\\nto ExternalName (which requires this field to be blank) or the type\\nfield is being changed from ExternalName (in which case this field may\\noptionally be specified, as describe above). Valid values are \\\"None\\\",\\nempty string (\\\"\\\"), or a valid IP address. Setting this to \\\"None\\\" makes a\\n\\\"headless service\\\" (no virtual IP), which is useful when direct endpoint\\nconnections are preferred and proxying is not required. Only applies to\\ntypes ClusterIP, NodePort, and LoadBalancer. If this field is specified\\nwhen creating a Service of type ExternalName, creation will fail. This\\nfield will be wiped when updating a Service to type ExternalName.\\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\\n+optional\"\n" + " },\n" + " \"clusterIPs\": {\n" + - " \"description\": \"ClusterIPs is a list of IP addresses assigned to this service, and are\\nusually assigned randomly. If an address is specified manually, is\\nin-range (as per system configuration), and is not in use, it will be\\nallocated to the service; otherwise creation of the service will fail.\\nThis field may not be changed through updates unless the type field is\\nalso being changed to ExternalName (which requires this field to be\\nempty) or the type field is being changed from ExternalName (in which\\ncase this field may optionally be specified, as describe above). Valid\\nvalues are \\\"None\\\", empty string (\\\"\\\"), or a valid IP address. Setting\\nthis to \\\"None\\\" makes a \\\"headless service\\\" (no virtual IP), which is\\nuseful when direct endpoint connections are preferred and proxying is\\nnot required. Only applies to types ClusterIP, NodePort, and\\nLoadBalancer. If this field is specified when creating a Service of type\\nExternalName, creation will fail. This field will be wiped when updating\\na Service to type ExternalName. If this field is not specified, it will\\nbe initialized from the clusterIP field. If this field is specified,\\nclients must ensure that clusterIPs[0] and clusterIP have the same\\nvalue.\\n\\nUnless the \\\"IPv6DualStack\\\" feature gate is enabled, this field is\\nlimited to one value, which must be the same as the clusterIP field. If\\nthe feature gate is enabled, this field may hold a maximum of two\\nentries (dual-stack IPs, in either order). These IPs must correspond to\\nthe values of the ipFamilies field. Both clusterIPs and ipFamilies are\\ngoverned by the ipFamilyPolicy field.\\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\\n+listType=atomic\\n+optional\",\n" + + " \"description\": \"ClusterIPs is a list of IP addresses assigned to this service, and are\\nusually assigned randomly. If an address is specified manually, is\\nin-range (as per system configuration), and is not in use, it will be\\nallocated to the service; otherwise creation of the service will fail.\\nThis field may not be changed through updates unless the type field is\\nalso being changed to ExternalName (which requires this field to be\\nempty) or the type field is being changed from ExternalName (in which\\ncase this field may optionally be specified, as describe above). Valid\\nvalues are \\\"None\\\", empty string (\\\"\\\"), or a valid IP address. Setting\\nthis to \\\"None\\\" makes a \\\"headless service\\\" (no virtual IP), which is\\nuseful when direct endpoint connections are preferred and proxying is\\nnot required. Only applies to types ClusterIP, NodePort, and\\nLoadBalancer. If this field is specified when creating a Service of type\\nExternalName, creation will fail. This field will be wiped when updating\\na Service to type ExternalName. If this field is not specified, it will\\nbe initialized from the clusterIP field. If this field is specified,\\nclients must ensure that clusterIPs[0] and clusterIP have the same\\nvalue.\\n\\nThis field may hold a maximum of two entries (dual-stack IPs, in either order).\\nThese IPs must correspond to the values of the ipFamilies field. Both\\nclusterIPs and ipFamilies are governed by the ipFamilyPolicy field.\\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\\n+listType=atomic\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -5190,19 +5302,19 @@ func SwaggerJsonTemplate() string { " },\n" + " \"externalTrafficPolicy\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"externalTrafficPolicy denotes if this Service desires to route external\\ntraffic to node-local or cluster-wide endpoints. \\\"Local\\\" preserves the\\nclient source IP and avoids a second hop for LoadBalancer and Nodeport\\ntype services, but risks potentially imbalanced traffic spreading.\\n\\\"Cluster\\\" obscures the client source IP and may cause a second hop to\\nanother node, but should have good overall load-spreading.\\n+optional\"\n" + + " \"title\": \"externalTrafficPolicy describes how nodes distribute service traffic they\\nreceive on one of the Service's \\\"externally-facing\\\" addresses (NodePorts,\\nExternalIPs, and LoadBalancer IPs). If set to \\\"Local\\\", the proxy will configure\\nthe service in a way that assumes that external load balancers will take care\\nof balancing the service traffic between nodes, and so each node will deliver\\ntraffic only to the node-local endpoints of the service, without masquerading\\nthe client source IP. (Traffic mistakenly sent to a node with no endpoints will\\nbe dropped.) The default value, \\\"Cluster\\\", uses the standard behavior of\\nrouting to all endpoints evenly (possibly modified by topology and other\\nfeatures). Note that traffic sent to an External IP or LoadBalancer IP from\\nwithin the cluster will always get \\\"Cluster\\\" semantics, but clients sending to\\na NodePort from within the cluster may need to take traffic policy into account\\nwhen picking a node.\\n+optional\"\n" + " },\n" + " \"healthCheckNodePort\": {\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + - " \"title\": \"healthCheckNodePort specifies the healthcheck nodePort for the service.\\nThis only applies when type is set to LoadBalancer and\\nexternalTrafficPolicy is set to Local. If a value is specified, is\\nin-range, and is not in use, it will be used. If not specified, a value\\nwill be automatically allocated. External systems (e.g. load-balancers)\\ncan use this port to determine if a given node holds endpoints for this\\nservice or not. If this field is specified when creating a Service\\nwhich does not need it, creation will fail. This field will be wiped\\nwhen updating a Service to no longer need it (e.g. changing type).\\n+optional\"\n" + + " \"title\": \"healthCheckNodePort specifies the healthcheck nodePort for the service.\\nThis only applies when type is set to LoadBalancer and\\nexternalTrafficPolicy is set to Local. If a value is specified, is\\nin-range, and is not in use, it will be used. If not specified, a value\\nwill be automatically allocated. External systems (e.g. load-balancers)\\ncan use this port to determine if a given node holds endpoints for this\\nservice or not. If this field is specified when creating a Service\\nwhich does not need it, creation will fail. This field will be wiped\\nwhen updating a Service to no longer need it (e.g. changing type).\\nThis field cannot be updated once set.\\n+optional\"\n" + " },\n" + " \"internalTrafficPolicy\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"InternalTrafficPolicy specifies if the cluster internal traffic\\nshould be routed to all endpoints or node-local endpoints only.\\n\\\"Cluster\\\" routes internal traffic to a Service to all endpoints.\\n\\\"Local\\\" routes traffic to node-local endpoints only, traffic is\\ndropped if no node-local endpoints are ready.\\nThe default value is \\\"Cluster\\\".\\n+featureGate=ServiceInternalTrafficPolicy\\n+optional\"\n" + + " \"title\": \"InternalTrafficPolicy describes how nodes distribute service traffic they\\nreceive on the ClusterIP. If set to \\\"Local\\\", the proxy will assume that pods\\nonly want to talk to endpoints of the service on the same node as the pod,\\ndropping the traffic if there are no local endpoints. The default value,\\n\\\"Cluster\\\", uses the standard behavior of routing to all endpoints evenly\\n(possibly modified by topology and other features).\\n+optional\"\n" + " },\n" + " \"ipFamilies\": {\n" + - " \"description\": \"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this\\nservice, and is gated by the \\\"IPv6DualStack\\\" feature gate. This field\\nis usually assigned automatically based on cluster configuration and the\\nipFamilyPolicy field. If this field is specified manually, the requested\\nfamily is available in the cluster, and ipFamilyPolicy allows it, it\\nwill be used; otherwise creation of the service will fail. This field\\nis conditionally mutable: it allows for adding or removing a secondary\\nIP family, but it does not allow changing the primary IP family of the\\nService. Valid values are \\\"IPv4\\\" and \\\"IPv6\\\". This field only applies\\nto Services of types ClusterIP, NodePort, and LoadBalancer, and does\\napply to \\\"headless\\\" services. This field will be wiped when updating a\\nService to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in\\neither order). These families must correspond to the values of the\\nclusterIPs field, if specified. Both clusterIPs and ipFamilies are\\ngoverned by the ipFamilyPolicy field.\\n+listType=atomic\\n+optional\",\n" + + " \"description\": \"IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this\\nservice. This field is usually assigned automatically based on cluster\\nconfiguration and the ipFamilyPolicy field. If this field is specified\\nmanually, the requested family is available in the cluster,\\nand ipFamilyPolicy allows it, it will be used; otherwise creation of\\nthe service will fail. This field is conditionally mutable: it allows\\nfor adding or removing a secondary IP family, but it does not allow\\nchanging the primary IP family of the Service. Valid values are \\\"IPv4\\\"\\nand \\\"IPv6\\\". This field only applies to Services of types ClusterIP,\\nNodePort, and LoadBalancer, and does apply to \\\"headless\\\" services.\\nThis field will be wiped when updating a Service to type ExternalName.\\n\\nThis field may hold a maximum of two entries (dual-stack families, in\\neither order). These families must correspond to the values of the\\nclusterIPs field, if specified. Both clusterIPs and ipFamilies are\\ngoverned by the ipFamilyPolicy field.\\n+listType=atomic\\n+optional\",\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + " \"type\": \"string\"\n" + @@ -5210,15 +5322,15 @@ func SwaggerJsonTemplate() string { " },\n" + " \"ipFamilyPolicy\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"IPFamilyPolicy represents the dual-stack-ness requested or required by\\nthis Service, and is gated by the \\\"IPv6DualStack\\\" feature gate. If\\nthere is no value provided, then this field will be set to SingleStack.\\nServices can be \\\"SingleStack\\\" (a single IP family), \\\"PreferDualStack\\\"\\n(two IP families on dual-stack configured clusters or a single IP family\\non single-stack clusters), or \\\"RequireDualStack\\\" (two IP families on\\ndual-stack configured clusters, otherwise fail). The ipFamilies and\\nclusterIPs fields depend on the value of this field. This field will be\\nwiped when updating a service to type ExternalName.\\n+optional\"\n" + + " \"title\": \"IPFamilyPolicy represents the dual-stack-ness requested or required by\\nthis Service. If there is no value provided, then this field will be set\\nto SingleStack. Services can be \\\"SingleStack\\\" (a single IP family),\\n\\\"PreferDualStack\\\" (two IP families on dual-stack configured clusters or\\na single IP family on single-stack clusters), or \\\"RequireDualStack\\\"\\n(two IP families on dual-stack configured clusters, otherwise fail). The\\nipFamilies and clusterIPs fields depend on the value of this field. This\\nfield will be wiped when updating a service to type ExternalName.\\n+optional\"\n" + " },\n" + " \"loadBalancerClass\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"loadBalancerClass is the class of the load balancer implementation this Service belongs to.\\nIf specified, the value of this field must be a label-style identifier, with an optional prefix,\\ne.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users.\\nThis field can only be set when the Service type is 'LoadBalancer'. If not set, the default load\\nbalancer implementation is used, today this is typically done through the cloud provider integration,\\nbut should apply for any default implementation. If set, it is assumed that a load balancer\\nimplementation is watching for Services with a matching class. Any default load balancer\\nimplementation (e.g. cloud providers) should ignore Services that set this field.\\nThis field can only be set when creating or updating a Service to type 'LoadBalancer'.\\nOnce set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\\n+featureGate=LoadBalancerClass\\n+optional\"\n" + + " \"title\": \"loadBalancerClass is the class of the load balancer implementation this Service belongs to.\\nIf specified, the value of this field must be a label-style identifier, with an optional prefix,\\ne.g. \\\"internal-vip\\\" or \\\"example.com/internal-vip\\\". Unprefixed names are reserved for end-users.\\nThis field can only be set when the Service type is 'LoadBalancer'. If not set, the default load\\nbalancer implementation is used, today this is typically done through the cloud provider integration,\\nbut should apply for any default implementation. If set, it is assumed that a load balancer\\nimplementation is watching for Services with a matching class. Any default load balancer\\nimplementation (e.g. cloud providers) should ignore Services that set this field.\\nThis field can only be set when creating or updating a Service to type 'LoadBalancer'.\\nOnce set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\\n+optional\"\n" + " },\n" + " \"loadBalancerIP\": {\n" + " \"type\": \"string\",\n" + - " \"title\": \"Only applies to Service Type: LoadBalancer\\nLoadBalancer will get created with the IP specified in this field.\\nThis feature depends on whether the underlying cloud-provider supports specifying\\nthe loadBalancerIP when a load balancer is created.\\nThis field will be ignored if the cloud-provider does not support the feature.\\n+optional\"\n" + + " \"title\": \"Only applies to Service Type: LoadBalancer.\\nThis feature depends on whether the underlying cloud-provider supports specifying\\nthe loadBalancerIP when a load balancer is created.\\nThis field will be ignored if the cloud-provider does not support the feature.\\nDeprecated: This field was under-specified and its meaning varies across implementations,\\nand it cannot support dual-stack.\\nAs of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.\\nThis field may be removed in a future API version.\\n+optional\"\n" + " },\n" + " \"loadBalancerSourceRanges\": {\n" + " \"type\": \"array\",\n" + @@ -5296,12 +5408,12 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a StorageOS persistent volume resource.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + + " \"description\": \"fsType is the filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"readOnly\": {\n" + - " \"description\": \"Defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + + " \"description\": \"readOnly defaults to false (read/write). ReadOnly here will force\\nthe ReadOnly setting in VolumeMounts.\\n+optional\",\n" + " \"type\": \"boolean\",\n" + " \"x-go-name\": \"ReadOnly\"\n" + " },\n" + @@ -5309,12 +5421,12 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1LocalObjectReference\"\n" + " },\n" + " \"volumeName\": {\n" + - " \"description\": \"VolumeName is the human-readable name of the StorageOS volume. Volume\\nnames are only unique within a namespace.\",\n" + + " \"description\": \"volumeName is the human-readable name of the StorageOS volume. Volume\\nnames are only unique within a namespace.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeName\"\n" + " },\n" + " \"volumeNamespace\": {\n" + - " \"description\": \"VolumeNamespace specifies the scope of the volume within StorageOS. If no\\nnamespace is specified then the Pod's namespace will be used. This allows the\\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\\nSet VolumeName to any name to override the default behaviour.\\nSet to \\\"default\\\" if you are not using namespaces within StorageOS.\\nNamespaces that do not pre-exist within StorageOS will be created.\\n+optional\",\n" + + " \"description\": \"volumeNamespace specifies the scope of the volume within StorageOS. If no\\nnamespace is specified then the Pod's namespace will be used. This allows the\\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\\nSet VolumeName to any name to override the default behaviour.\\nSet to \\\"default\\\" if you are not using namespaces within StorageOS.\\nNamespaces that do not pre-exist within StorageOS will be created.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumeNamespace\"\n" + " }\n" + @@ -5354,10 +5466,12 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1TaintEffect\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1TerminationMessagePolicy\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"TerminationMessagePolicy describes how termination messages are retrieved from a container.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -5399,6 +5513,7 @@ func SwaggerJsonTemplate() string { " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1TolerationOperator\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"title\": \"A toleration operator is the set of operators that can be used in a toleration.\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + @@ -5410,14 +5525,34 @@ func SwaggerJsonTemplate() string { " \"labelSelector\": {\n" + " \"$ref\": \"#/definitions/v1LabelSelector\"\n" + " },\n" + + " \"matchLabelKeys\": {\n" + + " \"description\": \"MatchLabelKeys is a set of pod label keys to select the pods over which\\nspreading will be calculated. The keys are used to lookup values from the\\nincoming pod labels, those key-value labels are ANDed with labelSelector\\nto select the group of existing pods over which spreading will be calculated\\nfor the incoming pod. Keys that don't exist in the incoming pod labels will\\nbe ignored. A null or empty list means only match against labelSelector.\\n+listType=atomic\\n+optional\",\n" + + " \"type\": \"array\",\n" + + " \"items\": {\n" + + " \"type\": \"string\"\n" + + " },\n" + + " \"x-go-name\": \"MatchLabelKeys\"\n" + + " },\n" + " \"maxSkew\": {\n" + - " \"description\": \"MaxSkew describes the degree to which pods may be unevenly distributed.\\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\\nbetween the number of matching pods in the target topology and the global minimum.\\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\\nlabelSelector spread as 1/1/0:\\n+-------+-------+-------+\\n zone1 | zone2 | zone3 |\\n+-------+-------+-------+\\n P | P | |\\n+-------+-------+-------+\\nif MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1;\\nscheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)\\nviolate MaxSkew(1).\\nif MaxSkew is 2, incoming pod can be scheduled onto any zone.\\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\\nto topologies that satisfy it.\\nIt's a required field. Default value is 1 and 0 is not allowed.\",\n" + + " \"description\": \"MaxSkew describes the degree to which pods may be unevenly distributed.\\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\\nbetween the number of matching pods in the target topology and the global minimum.\\nThe global minimum is the minimum number of matching pods in an eligible domain\\nor zero if the number of eligible domains is less than MinDomains.\\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\\nlabelSelector spread as 2/2/1:\\nIn this case, the global minimum is 1.\\n+-------+-------+-------+\\n zone1 | zone2 | zone3 |\\n+-------+-------+-------+\\n P P | P P | P |\\n+-------+-------+-------+\\nif MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\\nviolate MaxSkew(1).\\nif MaxSkew is 2, incoming pod can be scheduled onto any zone.\\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\\nto topologies that satisfy it.\\nIt's a required field. Default value is 1 and 0 is not allowed.\",\n" + " \"type\": \"integer\",\n" + " \"format\": \"int32\",\n" + " \"x-go-name\": \"MaxSkew\"\n" + " },\n" + + " \"minDomains\": {\n" + + " \"description\": \"MinDomains indicates a minimum number of eligible domains.\\nWhen the number of eligible domains with matching topology keys is less than minDomains,\\nPod Topology Spread treats \\\"global minimum\\\" as 0, and then the calculation of Skew is performed.\\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\\nthis value has no effect on scheduling.\\nAs a result, when the number of eligible domains is less than minDomains,\\nscheduler won't schedule more than maxSkew Pods to those domains.\\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\\nValid values are integers greater than 0.\\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\\n\\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\\nlabelSelector spread as 2/2/2:\\n+-------+-------+-------+\\n zone1 | zone2 | zone3 |\\n+-------+-------+-------+\\n P P | P P | P P |\\n+-------+-------+-------+\\nThe number of domains is less than 5(MinDomains), so \\\"global minimum\\\" is treated as 0.\\nIn this situation, new pod with the same labelSelector cannot be scheduled,\\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\\nit will violate MaxSkew.\\n\\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).\\n+optional\",\n" + + " \"type\": \"integer\",\n" + + " \"format\": \"int32\",\n" + + " \"x-go-name\": \"MinDomains\"\n" + + " },\n" + + " \"nodeAffinityPolicy\": {\n" + + " \"$ref\": \"#/definitions/v1NodeInclusionPolicy\"\n" + + " },\n" + + " \"nodeTaintsPolicy\": {\n" + + " \"$ref\": \"#/definitions/v1NodeInclusionPolicy\"\n" + + " },\n" + " \"topologyKey\": {\n" + - " \"description\": \"TopologyKey is the key of node labels. Nodes that have a label with this key\\nand identical values are considered to be in the same topology.\\nWe consider each \\u003ckey, value\\u003e as a \\\"bucket\\\", and try to put balanced number\\nof pods into each bucket.\\nIt's a required field.\",\n" + + " \"description\": \"TopologyKey is the key of node labels. Nodes that have a label with this key\\nand identical values are considered to be in the same topology.\\nWe consider each \\u003ckey, value\\u003e as a \\\"bucket\\\", and try to put balanced number\\nof pods into each bucket.\\nWe define a domain as a particular instance of a topology.\\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\\nnodeAffinityPolicy and nodeTaintsPolicy.\\ne.g. If TopologyKey is \\\"kubernetes.io/hostname\\\", each Node is a domain of that topology.\\nAnd, if TopologyKey is \\\"topology.kubernetes.io/zone\\\", each zone is a domain of that topology.\\nIt's a required field.\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"TopologyKey\"\n" + " },\n" + @@ -5449,12 +5584,39 @@ func SwaggerJsonTemplate() string { " },\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + + " \"v1TypedObjectReference\": {\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"apiGroup\": {\n" + + " \"description\": \"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required.\\n+optional\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"APIGroup\"\n" + + " },\n" + + " \"kind\": {\n" + + " \"description\": \"Kind is the type of resource being referenced\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Kind\"\n" + + " },\n" + + " \"name\": {\n" + + " \"description\": \"Name is the name of resource being referenced\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Name\"\n" + + " },\n" + + " \"namespace\": {\n" + + " \"description\": \"Namespace is the namespace of resource being referenced\\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\\n+featureGate=CrossNamespaceVolumeDataSource\\n+optional\",\n" + + " \"type\": \"string\",\n" + + " \"x-go-name\": \"Namespace\"\n" + + " }\n" + + " },\n" + + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + + " },\n" + " \"v1URIScheme\": {\n" + - " \"description\": \"URIScheme identifies the scheme used for connection to a host for Get actions\",\n" + + " \"description\": \"URIScheme identifies the scheme used for connection to a host for Get actions\\n+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + " \"v1UnsatisfiableConstraintAction\": {\n" + + " \"description\": \"+enum\",\n" + " \"type\": \"string\",\n" + " \"x-go-package\": \"k8s.io/api/core/v1\"\n" + " },\n" + @@ -5517,7 +5679,7 @@ func SwaggerJsonTemplate() string { " \"$ref\": \"#/definitions/v1ISCSIVolumeSource\"\n" + " },\n" + " \"name\": {\n" + - " \"description\": \"Volume's name.\\nMust be a DNS_LABEL and unique within the pod.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\",\n" + + " \"description\": \"name of the volume.\\nMust be a DNS_LABEL and unique within the pod.\\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"Name\"\n" + " },\n" + @@ -5633,22 +5795,22 @@ func SwaggerJsonTemplate() string { " \"title\": \"Represents a vSphere volume resource.\",\n" + " \"properties\": {\n" + " \"fsType\": {\n" + - " \"description\": \"Filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + + " \"description\": \"fsType is filesystem type to mount.\\nMust be a filesystem type supported by the host operating system.\\nEx. \\\"ext4\\\", \\\"xfs\\\", \\\"ntfs\\\". Implicitly inferred to be \\\"ext4\\\" if unspecified.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"FSType\"\n" + " },\n" + " \"storagePolicyID\": {\n" + - " \"description\": \"Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\\n+optional\",\n" + + " \"description\": \"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"StoragePolicyID\"\n" + " },\n" + " \"storagePolicyName\": {\n" + - " \"description\": \"Storage Policy Based Management (SPBM) profile name.\\n+optional\",\n" + + " \"description\": \"storagePolicyName is the storage Policy Based Management (SPBM) profile name.\\n+optional\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"StoragePolicyName\"\n" + " },\n" + " \"volumePath\": {\n" + - " \"description\": \"Path that identifies vSphere volume vmdk\",\n" + + " \"description\": \"volumePath is the path that identifies vSphere volume vmdk\",\n" + " \"type\": \"string\",\n" + " \"x-go-name\": \"VolumePath\"\n" + " }\n" + diff --git a/pkg/api/api.swagger.json b/pkg/api/api.swagger.json index 7900e81c0c4..422a9fb425e 100644 --- a/pkg/api/api.swagger.json +++ b/pkg/api/api.swagger.json @@ -613,7 +613,8 @@ "Error", "Evicted", "OOM", - "DeadlineExceeded" + "DeadlineExceeded", + "Rejected" ] }, "apiContainerStatus": { @@ -650,15 +651,9 @@ "cancelling": { "$ref": "#/definitions/apiJobCancellingEvent" }, - "duplicateFound": { - "$ref": "#/definitions/apiJobDuplicateFoundEvent" - }, "failed": { "$ref": "#/definitions/apiJobFailedEvent" }, - "failedCompressed": { - "$ref": "#/definitions/apiJobFailedEventCompressed" - }, "ingressInfo": { "$ref": "#/definitions/apiJobIngressInfoEvent" }, @@ -704,9 +699,6 @@ "unableToSchedule": { "$ref": "#/definitions/apiJobUnableToScheduleEvent" }, - "updated": { - "$ref": "#/definitions/apiJobUpdatedEvent" - }, "utilisation": { "$ref": "#/definitions/apiJobUtilisationEvent" } @@ -1016,27 +1008,6 @@ } } }, - "apiJobDuplicateFoundEvent": { - "type": "object", - "properties": { - "created": { - "type": "string", - "format": "date-time" - }, - "jobId": { - "type": "string" - }, - "jobSetId": { - "type": "string" - }, - "originalJobId": { - "type": "string" - }, - "queue": { - "type": "string" - } - } - }, "apiJobFailedEvent": { "type": "object", "properties": { @@ -1093,16 +1064,6 @@ } } }, - "apiJobFailedEventCompressed": { - "type": "object", - "title": "Only used internally by Armada", - "properties": { - "event": { - "type": "string", - "format": "byte" - } - } - }, "apiJobIngressInfoEvent": { "type": "object", "properties": { @@ -1572,12 +1533,6 @@ "errorIfMissing": { "type": "boolean" }, - "forceLegacy": { - "type": "boolean" - }, - "forceNew": { - "type": "boolean" - }, "fromMessageId": { "type": "string" }, @@ -1689,11 +1644,6 @@ "type": "number", "format": "double" }, - "queueTtlSeconds": { - "description": "Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.", - "type": "string", - "format": "int64" - }, "requiredNodeLabels": { "type": "object", "additionalProperties": { @@ -1870,30 +1820,6 @@ } } }, - "apiJobUpdatedEvent": { - "type": "object", - "properties": { - "created": { - "type": "string", - "format": "date-time" - }, - "job": { - "$ref": "#/definitions/apiJob" - }, - "jobId": { - "type": "string" - }, - "jobSetId": { - "type": "string" - }, - "queue": { - "type": "string" - }, - "requestor": { - "type": "string" - } - } - }, "apiJobUtilisationEvent": { "type": "object", "properties": { @@ -2000,6 +1926,7 @@ }, "resourceLimits": { "type": "object", + "title": "These are ignored and should be removed", "additionalProperties": { "type": "number", "format": "double" @@ -2125,7 +2052,7 @@ } }, "resourceQuantity": { - "description": "The serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\na. No precision is lost\nb. No fractional digits will be emitted\nc. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n1.5 will be serialized as \"1500m\"\n1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true", + "description": "The serialization format is:\n\n```\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n```\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n\nNo precision is lost\nNo fractional digits will be emitted\nThe exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n1.5 will be serialized as \"1500m\"\n1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true", "type": "string", "title": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and AsInt64() accessors.", "x-go-package": "k8s.io/apimachinery/pkg/api/resource" @@ -2187,23 +2114,23 @@ "title": "Represents a Persistent Disk resource in AWS.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", + "description": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", "type": "string", "x-go-name": "FSType" }, "partition": { - "description": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional", + "description": "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional", "type": "integer", "format": "int32", "x-go-name": "Partition" }, "readOnly": { - "description": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\".\nIf omitted, the default is \"false\".\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional", + "description": "readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "volumeID": { - "description": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "string", "x-go-name": "VolumeID" } @@ -2227,10 +2154,12 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1AzureDataDiskCachingMode": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, "v1AzureDataDiskKind": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -2242,17 +2171,17 @@ "$ref": "#/definitions/v1AzureDataDiskCachingMode" }, "diskName": { - "description": "The Name of the data disk in the blob storage", + "description": "diskName is the Name of the data disk in the blob storage", "type": "string", "x-go-name": "DiskName" }, "diskURI": { - "description": "The URI the data disk in the blob storage", + "description": "diskURI is the URI of data disk in the blob storage", "type": "string", "x-go-name": "DataDiskURI" }, "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", + "description": "fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", "type": "string", "x-go-name": "FSType" }, @@ -2260,7 +2189,7 @@ "$ref": "#/definitions/v1AzureDataDiskKind" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" } @@ -2272,17 +2201,17 @@ "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "properties": { "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "secretName": { - "description": "the name of secret that contains Azure Storage Account Name and Key", + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", "type": "string", "x-go-name": "SecretName" }, "shareName": { - "description": "Share Name", + "description": "shareName is the azure share Name", "type": "string", "x-go-name": "ShareName" } @@ -2294,12 +2223,12 @@ "type": "object", "properties": { "driver": { - "description": "Driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster.", + "description": "driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster.", "type": "string", "x-go-name": "Driver" }, "fsType": { - "description": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional", + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional", "type": "string", "x-go-name": "FSType" }, @@ -2307,12 +2236,12 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "readOnly": { - "description": "Specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional", + "description": "readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "volumeAttributes": { - "description": "VolumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional", + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional", "type": "object", "additionalProperties": { "type": "string" @@ -2355,7 +2284,7 @@ "type": "object", "properties": { "monitors": { - "description": "Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "array", "items": { "type": "string" @@ -2363,17 +2292,17 @@ "x-go-name": "Monitors" }, "path": { - "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional", + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional", "type": "string", "x-go-name": "Path" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "secretFile": { - "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", "type": "string", "x-go-name": "SecretFile" }, @@ -2381,7 +2310,7 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "user": { - "description": "Optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", + "description": "user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional", "type": "string", "x-go-name": "User" } @@ -2394,12 +2323,12 @@ "title": "Represents a cinder volume resource in Openstack.", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional", "type": "string", "x-go-name": "FSType" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional", + "description": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -2407,13 +2336,31 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "volumeID": { - "description": "volume id used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "string", "x-go-name": "VolumeID" } }, "x-go-package": "k8s.io/api/core/v1" }, + "v1ClaimSource": { + "description": "Exactly one of these fields should be set. Consumers of this type must\ntreat an empty object as if it has an unknown value.", + "type": "object", + "title": "ClaimSource describes a reference to a ResourceClaim.", + "properties": { + "resourceClaimName": { + "description": "ResourceClaimName is the name of a ResourceClaim object in the same\nnamespace as this pod.", + "type": "string", + "x-go-name": "ResourceClaimName" + }, + "resourceClaimTemplateName": { + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate\nobject in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will\nbe bound to this pod. When this pod is deleted, the ResourceClaim\nwill also be deleted. The name of the ResourceClaim will be \u003cpod\nname\u003e-\u003cresource name\u003e, where \u003cresource name\u003e is the\nPodResourceClaim.Name. Pod validation will reject the pod if the\nconcatenated name is not valid for a ResourceClaim (e.g. too long).\n\nAn existing ResourceClaim with that name that is not owned by the\npod will not be used for the pod to avoid using an unrelated\nresource by mistake. Scheduling and pod startup are then blocked\nuntil the unrelated ResourceClaim is removed.\n\nThis field is immutable and no changes will be made to the\ncorresponding ResourceClaim by the control plane after creating the\nResourceClaim.", + "type": "string", + "x-go-name": "ResourceClaimTemplateName" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1ClientIPConfig": { "description": "ClientIPConfig represents the configurations of Client IP based session affinity.", "type": "object", @@ -2426,9 +2373,9 @@ } }, "v1Condition": { - "description": "// other fields\n}", + "description": "type FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\t // other fields\n\t}", "type": "object", - "title": "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\ntype FooStatus struct{\n // Represents the observations of a foo's current state.\n // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n // +patchMergeKey=type\n // +patchStrategy=merge\n // +listType=map\n // +listMapKey=type\n Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`", + "title": "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,", "properties": { "lastTransitionTime": { "title": "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Type=string\n+kubebuilder:validation:Format=date-time", @@ -2504,7 +2451,7 @@ "title": "Adapts a ConfigMap into a projected volume.", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", + "description": "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1KeyToPath" @@ -2517,7 +2464,7 @@ "x-go-name": "Name" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined\n+optional", + "description": "optional specify whether the ConfigMap or its keys must be defined\n+optional", "type": "boolean", "x-go-name": "Optional" } @@ -2530,13 +2477,13 @@ "title": "Adapts a ConfigMap into a volume.", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", + "description": "defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", "type": "integer", "format": "int32", "x-go-name": "DefaultMode" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", + "description": "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1KeyToPath" @@ -2549,7 +2496,7 @@ "x-go-name": "Name" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined\n+optional", + "description": "optional specify whether the ConfigMap or its keys must be defined\n+optional", "type": "boolean", "x-go-name": "Optional" } @@ -2561,7 +2508,7 @@ "title": "A single application container that you want to run within a pod.", "properties": { "args": { - "description": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", + "description": "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", "type": "array", "items": { "type": "string" @@ -2569,7 +2516,7 @@ "x-go-name": "Args" }, "command": { - "description": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", + "description": "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", "type": "array", "items": { "type": "string" @@ -2593,7 +2540,7 @@ "x-go-name": "EnvFrom" }, "image": { - "description": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional", + "description": "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional", "type": "string", "x-go-name": "Image" }, @@ -2612,7 +2559,7 @@ "x-go-name": "Name" }, "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives\nthe system additional information about the network connections a\ncontainer uses, but is primarily informational. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nCannot be updated.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol", + "description": "List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol", "type": "array", "items": { "$ref": "#/definitions/v1ContainerPort" @@ -2711,6 +2658,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1DNSPolicy": { + "description": "+enum", "type": "string", "title": "DNSPolicy defines how a pod's DNS will be configured.", "x-go-package": "k8s.io/api/core/v1" @@ -2849,11 +2797,12 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1EphemeralContainer": { - "description": "An EphemeralContainer is a container that may be added temporarily to an existing pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a pod is\nremoved or restarted. If an ephemeral container causes a pod to exceed its resource\nallocation, the pod may be evicted.\nEphemeral containers may not be added by directly updating the pod spec. They must be added\nvia the pod's ephemeralcontainers subresource, and they will appear in the pod spec\nonce added.\nThis is an alpha feature enabled by the EphemeralContainers feature flag.", + "description": "To add an ephemeral container, use the ephemeralcontainers subresource of an existing\nPod. Ephemeral containers may not be removed or restarted.", "type": "object", + "title": "An EphemeralContainer is a temporary container that you may add to an existing Pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a Pod is\nremoved or restarted. The kubelet may evict a Pod if an ephemeral container causes the\nPod to exceed its resource allocation.", "properties": { "args": { - "description": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", + "description": "Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", "type": "array", "items": { "type": "string" @@ -2861,7 +2810,7 @@ "x-go-name": "Args" }, "command": { - "description": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", + "description": "Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional", "type": "array", "items": { "type": "string" @@ -2885,7 +2834,7 @@ "x-go-name": "EnvFrom" }, "image": { - "description": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images", + "description": "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images", "type": "string", "x-go-name": "Image" }, @@ -2904,7 +2853,7 @@ "x-go-name": "Name" }, "ports": { - "description": "Ports are not allowed for ephemeral containers.", + "description": "Ports are not allowed for ephemeral containers.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol", "type": "array", "items": { "$ref": "#/definitions/v1ContainerPort" @@ -2934,7 +2883,7 @@ "x-go-name": "StdinOnce" }, "targetContainerName": { - "description": "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container is run in whatever namespaces are shared\nfor the pod. Note that the container runtime must support this feature.\n+optional", + "description": "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not\nsupport namespace targeting then the result of setting this field is undefined.\n+optional", "type": "string", "x-go-name": "TargetContainerName" }, @@ -2960,7 +2909,7 @@ "x-go-name": "VolumeDevices" }, "volumeMounts": { - "description": "Pod volumes to mount into the container's filesystem.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge", + "description": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge", "type": "array", "items": { "$ref": "#/definitions/v1VolumeMount" @@ -3006,23 +2955,23 @@ "title": "Represents a Fibre Channel volume.", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", "type": "string", "x-go-name": "FSType" }, "lun": { - "description": "Optional: FC target lun number\n+optional", + "description": "lun is Optional: FC target lun number\n+optional", "type": "integer", "format": "int32", "x-go-name": "Lun" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "targetWWNs": { - "description": "Optional: FC target worldwide names (WWNs)\n+optional", + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)\n+optional", "type": "array", "items": { "type": "string" @@ -3030,7 +2979,7 @@ "x-go-name": "TargetWWNs" }, "wwids": { - "description": "Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional", + "description": "wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional", "type": "array", "items": { "type": "string" @@ -3051,17 +3000,17 @@ "type": "object", "properties": { "driver": { - "description": "Driver is the name of the driver to use for this volume.", + "description": "driver is the name of the driver to use for this volume.", "type": "string", "x-go-name": "Driver" }, "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional", "type": "string", "x-go-name": "FSType" }, "options": { - "description": "Optional: Extra command options if any.\n+optional", + "description": "options is Optional: this field holds extra command options if any.\n+optional", "type": "object", "additionalProperties": { "type": "string" @@ -3069,7 +3018,7 @@ "x-go-name": "Options" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -3085,12 +3034,12 @@ "title": "Represents a Flocker volume mounted by the Flocker agent.", "properties": { "datasetName": { - "description": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional", + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional", "type": "string", "x-go-name": "DatasetName" }, "datasetUUID": { - "description": "UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional", + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional", "type": "string", "x-go-name": "DatasetUUID" } @@ -3103,46 +3052,63 @@ "title": "Represents a Persistent Disk resource in Google Compute Engine.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", + "description": "fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", "type": "string", "x-go-name": "FSType" }, "partition": { - "description": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional", + "description": "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional", "type": "integer", "format": "int32", "x-go-name": "Partition" }, "pdName": { - "description": "Unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "string", "x-go-name": "PDName" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional", "type": "boolean", "x-go-name": "ReadOnly" } }, "x-go-package": "k8s.io/api/core/v1" }, + "v1GRPCAction": { + "type": "object", + "properties": { + "port": { + "description": "Port number of the gRPC service. Number must be in the range 1 to 65535.", + "type": "integer", + "format": "int32", + "x-go-name": "Port" + }, + "service": { + "description": "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.\n+optional\n+default=\"\"", + "type": "string", + "x-go-name": "Service" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1GitRepoVolumeSource": { "description": "DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.", "type": "object", "title": "Represents a volume that is populated with the contents of a git repository.\nGit repo volumes do not support ownership management.\nGit repo volumes support SELinux relabeling.", "properties": { "directory": { - "description": "Target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional", + "description": "directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional", "type": "string", "x-go-name": "Directory" }, "repository": { - "description": "Repository URL", + "description": "repository is the URL", "type": "string", "x-go-name": "Repository" }, "revision": { - "description": "Commit hash for the specified revision.\n+optional", + "description": "revision is the commit hash for the specified revision.\n+optional", "type": "string", "x-go-name": "Revision" } @@ -3155,17 +3121,17 @@ "title": "Represents a Glusterfs mount that lasts the lifetime of a pod.", "properties": { "endpoints": { - "description": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string", "x-go-name": "EndpointsName" }, "path": { - "description": "Path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string", "x-go-name": "Path" }, "readOnly": { - "description": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional", + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional", "type": "boolean", "x-go-name": "ReadOnly" } @@ -3208,7 +3174,7 @@ "type": "object", "properties": { "name": { - "description": "The header field name", + "description": "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header.", "type": "string", "x-go-name": "Name" }, @@ -3251,22 +3217,6 @@ } } }, - "v1Handler": { - "description": "Handler defines a specific action that should be taken\nTODO: pass structured data to these actions, and document that data here.", - "type": "object", - "properties": { - "exec": { - "$ref": "#/definitions/v1ExecAction" - }, - "httpGet": { - "$ref": "#/definitions/v1HTTPGetAction" - }, - "tcpSocket": { - "$ref": "#/definitions/v1TCPSocketAction" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, "v1HostAlias": { "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file.", "type": "object", @@ -3288,6 +3238,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1HostPathType": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -3297,7 +3248,7 @@ "title": "Represents a host path mapped into a pod.", "properties": { "path": { - "description": "Path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "type": "string", "x-go-name": "Path" }, @@ -3313,43 +3264,43 @@ "title": "Represents an ISCSI disk.", "properties": { "chapAuthDiscovery": { - "description": "whether support iSCSI Discovery CHAP authentication\n+optional", + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication\n+optional", "type": "boolean", "x-go-name": "DiscoveryCHAPAuth" }, "chapAuthSession": { - "description": "whether support iSCSI Session CHAP authentication\n+optional", + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication\n+optional", "type": "boolean", "x-go-name": "SessionCHAPAuth" }, "fsType": { - "description": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", + "description": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", "type": "string", "x-go-name": "FSType" }, "initiatorName": { - "description": "Custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional", + "description": "initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional", "type": "string", "x-go-name": "InitiatorName" }, "iqn": { - "description": "Target iSCSI Qualified Name.", + "description": "iqn is the target iSCSI Qualified Name.", "type": "string", "x-go-name": "IQN" }, "iscsiInterface": { - "description": "iSCSI Interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional", + "description": "iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional", "type": "string", "x-go-name": "ISCSIInterface" }, "lun": { - "description": "iSCSI Target Lun number.", + "description": "lun represents iSCSI Target Lun number.", "type": "integer", "format": "int32", "x-go-name": "Lun" }, "portals": { - "description": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional", + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional", "type": "array", "items": { "type": "string" @@ -3357,7 +3308,7 @@ "x-go-name": "Portals" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -3365,7 +3316,7 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "targetPortal": { - "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", "type": "string", "x-go-name": "TargetPortal" } @@ -3404,6 +3355,59 @@ } } }, + "v1IngressLoadBalancerIngress": { + "description": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "type": "object", + "properties": { + "hostname": { + "type": "string", + "title": "Hostname is set for load-balancer ingress points that are DNS based.\n+optional" + }, + "ip": { + "type": "string", + "title": "IP is set for load-balancer ingress points that are IP based.\n+optional" + }, + "ports": { + "type": "array", + "title": "Ports provides information about the ports exposed by this LoadBalancer.\n+listType=atomic\n+optional", + "items": { + "$ref": "#/definitions/v1IngressPortStatus" + } + } + } + }, + "v1IngressLoadBalancerStatus": { + "description": "IngressLoadBalancerStatus represents the status of a load-balancer.", + "type": "object", + "properties": { + "ingress": { + "type": "array", + "title": "Ingress is a list containing ingress points for the load-balancer.\n+optional", + "items": { + "$ref": "#/definitions/v1IngressLoadBalancerIngress" + } + } + } + }, + "v1IngressPortStatus": { + "type": "object", + "title": "IngressPortStatus represents the error condition of a service port", + "properties": { + "error": { + "type": "string", + "title": "Error is to record the problem with the service port\nThe format of the error shall comply with the following rules:\n- built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.\n---\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+optional\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316" + }, + "port": { + "description": "Port is the port number of the ingress port.", + "type": "integer", + "format": "int32" + }, + "protocol": { + "type": "string", + "title": "Protocol is the protocol of the ingress port.\nThe supported values are: \"TCP\", \"UDP\", \"SCTP\"" + } + } + }, "v1IngressRule": { "description": "IngressRule represents the rules mapping the paths under a specified host to\nthe related backend services. Incoming requests are first evaluated for a host\nmatch, then routed to the backend associated with the matching IngressRuleValue.", "type": "object", @@ -3452,7 +3456,7 @@ }, "ingressClassName": { "type": "string", - "title": "IngressClassName is the name of the IngressClass cluster resource. The\nassociated IngressClass defines which controller will implement the\nresource. This replaces the deprecated `kubernetes.io/ingress.class`\nannotation. For backwards compatibility, when that annotation is set, it\nmust be given precedence over this field. The controller may emit a\nwarning if the field and annotation have different values.\nImplementations of this API should ignore Ingresses without a class\nspecified. An IngressClass resource may be marked as default, which can\nbe used to set a default value for this field. For more information,\nrefer to the IngressClass documentation.\n+optional" + "title": "IngressClassName is the name of an IngressClass cluster resource. Ingress\ncontroller implementations use this field to know whether they should be\nserving this Ingress resource, by a transitive connection\n(controller -\u003e IngressClass -\u003e Ingress resource). Although the\n`kubernetes.io/ingress.class` annotation (simple constant name) was never\nformally defined, it was widely supported by Ingress controllers to create\na direct binding between Ingress controller and Ingress resources. Newly\ncreated Ingress resources should prefer using the field. However, even\nthough the annotation is officially deprecated, for backwards compatibility\nreasons, ingress controllers should still honor that annotation if present.\n+optional" }, "rules": { "type": "array", @@ -3476,7 +3480,7 @@ "properties": { "loadBalancer": { "title": "LoadBalancer contains the current status of the load-balancer.\n+optional", - "$ref": "#/definitions/v1LoadBalancerStatus" + "$ref": "#/definitions/v1IngressLoadBalancerStatus" } } }, @@ -3502,18 +3506,18 @@ "title": "Maps a string key to a path within a volume.", "properties": { "key": { - "description": "The key to project.", + "description": "key is the key to project.", "type": "string", "x-go-name": "Key" }, "mode": { - "description": "Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", + "description": "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", "type": "integer", "format": "int32", "x-go-name": "Mode" }, "path": { - "description": "The relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + "description": "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", "type": "string", "x-go-name": "Path" } @@ -3576,10 +3580,26 @@ "type": "object", "properties": { "postStart": { - "$ref": "#/definitions/v1Handler" + "$ref": "#/definitions/v1LifecycleHandler" }, "preStop": { - "$ref": "#/definitions/v1Handler" + "$ref": "#/definitions/v1LifecycleHandler" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, + "v1LifecycleHandler": { + "description": "LifecycleHandler defines a specific action that should be taken in a lifecycle\nhook. One and only one of the fields, except TCPSocket must be specified.", + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/v1ExecAction" + }, + "httpGet": { + "$ref": "#/definitions/v1HTTPGetAction" + }, + "tcpSocket": { + "$ref": "#/definitions/v1TCPSocketAction" } }, "x-go-package": "k8s.io/api/core/v1" @@ -3672,6 +3692,7 @@ "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" }, "v1MountPropagationMode": { + "description": "+enum", "type": "string", "title": "MountPropagationMode describes mount propagation.", "x-go-package": "k8s.io/api/core/v1" @@ -3682,17 +3703,17 @@ "title": "Represents an NFS mount that lasts the lifetime of a pod.", "properties": { "path": { - "description": "Path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string", "x-go-name": "Path" }, "readOnly": { - "description": "ReadOnly here will force\nthe NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional", + "description": "readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "server": { - "description": "Server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string", "x-go-name": "Server" } @@ -3717,6 +3738,11 @@ }, "x-go-package": "k8s.io/api/core/v1" }, + "v1NodeInclusionPolicy": { + "description": "NodeInclusionPolicy defines the type of node inclusion policy\n+enum", + "type": "string", + "x-go-package": "k8s.io/api/core/v1" + }, "v1NodeSelector": { "description": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms.\n+structType=atomic", "type": "object", @@ -3733,7 +3759,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1NodeSelectorOperator": { - "description": "A node selector operator is the set of operators that can be used in\na node selector requirement.", + "description": "A node selector operator is the set of operators that can be used in\na node selector requirement.\n+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -3783,6 +3809,11 @@ }, "x-go-package": "k8s.io/api/core/v1" }, + "v1OSName": { + "type": "string", + "title": "OSName is the set of OS'es that can be used in OS.", + "x-go-package": "k8s.io/api/core/v1" + }, "v1ObjectFieldSelector": { "description": "+structType=atomic", "type": "object", @@ -3813,11 +3844,6 @@ }, "x-go-name": "Annotations" }, - "clusterName": { - "description": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional", - "type": "string", - "x-go-name": "ClusterName" - }, "creationTimestamp": { "$ref": "#/definitions/v1Time" }, @@ -3839,7 +3865,7 @@ "x-go-name": "Finalizers" }, "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", "type": "string", "x-go-name": "GenerateName" }, @@ -3889,7 +3915,7 @@ "x-go-name": "ResourceVersion" }, "selfLink": { - "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional", "type": "string", "x-go-name": "SelfLink" }, @@ -3909,7 +3935,7 @@ "x-go-name": "APIVersion" }, "blockOwnerDeletion": { - "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional", + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional", "type": "boolean", "x-go-name": "BlockOwnerDeletion" }, @@ -3935,6 +3961,7 @@ "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" }, "v1PersistentVolumeAccessMode": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -3943,7 +3970,7 @@ "type": "object", "properties": { "accessModes": { - "description": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional", + "description": "accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1PersistentVolumeAccessMode" @@ -3954,7 +3981,7 @@ "$ref": "#/definitions/v1TypedLocalObjectReference" }, "dataSourceRef": { - "$ref": "#/definitions/v1TypedLocalObjectReference" + "$ref": "#/definitions/v1TypedObjectReference" }, "resources": { "$ref": "#/definitions/v1ResourceRequirements" @@ -3963,7 +3990,7 @@ "$ref": "#/definitions/v1LabelSelector" }, "storageClassName": { - "description": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional", + "description": "storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional", "type": "string", "x-go-name": "StorageClassName" }, @@ -3971,7 +3998,7 @@ "$ref": "#/definitions/v1PersistentVolumeMode" }, "volumeName": { - "description": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional", + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.\n+optional", "type": "string", "x-go-name": "VolumeName" } @@ -3990,11 +4017,6 @@ }, "x-go-name": "Annotations" }, - "clusterName": { - "description": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional", - "type": "string", - "x-go-name": "ClusterName" - }, "creationTimestamp": { "$ref": "#/definitions/v1Time" }, @@ -4016,7 +4038,7 @@ "x-go-name": "Finalizers" }, "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", "type": "string", "x-go-name": "GenerateName" }, @@ -4066,7 +4088,7 @@ "x-go-name": "ResourceVersion" }, "selfLink": { - "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional", "type": "string", "x-go-name": "SelfLink" }, @@ -4085,12 +4107,12 @@ "title": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.", "properties": { "claimName": { - "description": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "type": "string", "x-go-name": "ClaimName" }, "readOnly": { - "description": "Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional", + "description": "readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" } @@ -4098,6 +4120,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1PersistentVolumeMode": { + "description": "+enum", "type": "string", "title": "PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.", "x-go-package": "k8s.io/api/core/v1" @@ -4107,12 +4130,12 @@ "title": "Represents a Photon Controller persistent disk resource.", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string", "x-go-name": "FSType" }, "pdID": { - "description": "ID that identifies Photon Controller persistent disk", + "description": "pdID is the ID that identifies Photon Controller persistent disk", "type": "string", "x-go-name": "PdID" } @@ -4153,7 +4176,7 @@ "$ref": "#/definitions/v1LabelSelector" }, "namespaces": { - "description": "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"\n+optional", + "description": "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\".\n+optional", "type": "array", "items": { "type": "string" @@ -4245,10 +4268,20 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1PodFSGroupChangePolicy": { - "description": "PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume\nwhen volume is mounted.", + "description": "PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume\nwhen volume is mounted.\n+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, + "v1PodOS": { + "type": "object", + "title": "PodOS defines the OS parameters of a pod.", + "properties": { + "name": { + "$ref": "#/definitions/v1OSName" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1PodReadinessGate": { "description": "PodReadinessGate contains the reference to a pod condition", "type": "object", @@ -4259,13 +4292,41 @@ }, "x-go-package": "k8s.io/api/core/v1" }, + "v1PodResourceClaim": { + "description": "It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.\nContainers that need access to the ResourceClaim reference it with this name.", + "type": "object", + "title": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource.", + "properties": { + "name": { + "description": "Name uniquely identifies this resource claim inside the pod.\nThis must be a DNS_LABEL.", + "type": "string", + "x-go-name": "Name" + }, + "source": { + "$ref": "#/definitions/v1ClaimSource" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, + "v1PodSchedulingGate": { + "type": "object", + "title": "PodSchedulingGate is associated to a Pod to guard its scheduling.", + "properties": { + "name": { + "description": "Name of the scheduling gate.\nEach scheduling gate must have a unique name field.", + "type": "string", + "x-go-name": "Name" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1PodSecurityContext": { "description": "Some fields are also present in container.securityContext. Field values of\ncontainer.securityContext take precedence over field values of PodSecurityContext.", "type": "object", "title": "PodSecurityContext holds pod-level security attributes and common container settings.", "properties": { "fsGroup": { - "description": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\n+optional", + "description": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "integer", "format": "int64", "x-go-name": "FSGroup" @@ -4274,7 +4335,7 @@ "$ref": "#/definitions/v1PodFSGroupChangePolicy" }, "runAsGroup": { - "description": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional", + "description": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "integer", "format": "int64", "x-go-name": "RunAsGroup" @@ -4285,7 +4346,7 @@ "x-go-name": "RunAsNonRoot" }, "runAsUser": { - "description": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional", + "description": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "integer", "format": "int64", "x-go-name": "RunAsUser" @@ -4297,7 +4358,7 @@ "$ref": "#/definitions/v1SeccompProfile" }, "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\n+optional", + "description": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "array", "items": { "type": "integer", @@ -4306,7 +4367,7 @@ "x-go-name": "SupplementalGroups" }, "sysctls": { - "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\n+optional", + "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1Sysctl" @@ -4357,7 +4418,7 @@ "x-go-name": "EnableServiceLinks" }, "ephemeralContainers": { - "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\nThis field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge", + "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge", "type": "array", "items": { "$ref": "#/definitions/v1EphemeralContainer" @@ -4387,13 +4448,18 @@ "type": "boolean", "x-go-name": "HostPID" }, + "hostUsers": { + "description": "Use the host's user namespace.\nOptional: Default to true.\nIf set to true or not present, the pod will be run in the host user namespace, useful\nfor when the pod needs a feature only available to the host user namespace, such as\nloading a kernel module with CAP_SYS_MODULE.\nWhen set to false, a new userns is created for the pod. Setting false is useful for\nmitigating container breakout vulnerabilities even allowing users to run their\ncontainers as root without actually having root privileges on the host.\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\n+k8s:conversion-gen=false\n+optional", + "type": "boolean", + "x-go-name": "HostUsers" + }, "hostname": { "description": "Specifies the hostname of the Pod\nIf not specified, the pod's hostname will be set to a system-defined value.\n+optional", "type": "string", "x-go-name": "Hostname" }, "imagePullSecrets": { - "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use. For example,\nin the case of docker, only DockerConfig type secrets are honored.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\n+optional\n+patchMergeKey=name\n+patchStrategy=merge", + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod\n+optional\n+patchMergeKey=name\n+patchStrategy=merge", "type": "array", "items": { "$ref": "#/definitions/v1LocalObjectReference" @@ -4421,6 +4487,9 @@ }, "x-go-name": "NodeSelector" }, + "os": { + "$ref": "#/definitions/v1PodOS" + }, "overhead": { "$ref": "#/definitions/v1ResourceList" }, @@ -4446,11 +4515,19 @@ }, "x-go-name": "ReadinessGates" }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\nThis field is immutable.\n\n+patchMergeKey=name\n+patchStrategy=merge,retainKeys\n+listType=map\n+listMapKey=name\n+featureGate=DynamicResourceAllocation\n+optional", + "type": "array", + "items": { + "$ref": "#/definitions/v1PodResourceClaim" + }, + "x-go-name": "ResourceClaims" + }, "restartPolicy": { "$ref": "#/definitions/v1RestartPolicy" }, "runtimeClassName": { - "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class\nThis is a beta feature as of Kubernetes v1.14.\n+optional", + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class\n+optional", "type": "string", "x-go-name": "RuntimeClassName" }, @@ -4459,6 +4536,14 @@ "type": "string", "x-go-name": "SchedulerName" }, + "schedulingGates": { + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nMore info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.\n\nThis is an alpha-level feature enabled by PodSchedulingReadiness feature gate.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge\n+listType=map\n+listMapKey=name", + "type": "array", + "items": { + "$ref": "#/definitions/v1PodSchedulingGate" + }, + "x-go-name": "SchedulingGates" + }, "securityContext": { "$ref": "#/definitions/v1PodSecurityContext" }, @@ -4543,17 +4628,17 @@ "title": "PortworxVolumeSource represents a Portworx volume resource.", "properties": { "fsType": { - "description": "FSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string", "x-go-name": "FSType" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "volumeID": { - "description": "VolumeID uniquely identifies a Portworx volume", + "description": "volumeID uniquely identifies a Portworx volume", "type": "string", "x-go-name": "VolumeID" } @@ -4561,6 +4646,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1PreemptionPolicy": { + "description": "+enum", "type": "string", "title": "PreemptionPolicy describes a policy for if/when to preempt a pod.", "x-go-package": "k8s.io/api/core/v1" @@ -4594,6 +4680,9 @@ "format": "int32", "x-go-name": "FailureThreshold" }, + "grpc": { + "$ref": "#/definitions/v1GRPCAction" + }, "httpGet": { "$ref": "#/definitions/v1HTTPGetAction" }, @@ -4634,6 +4723,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1ProcMountType": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -4642,13 +4732,13 @@ "type": "object", "properties": { "defaultMode": { - "description": "Mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", + "description": "defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", "type": "integer", "format": "int32", "x-go-name": "DefaultMode" }, "sources": { - "description": "list of volume projections\n+optional", + "description": "sources is the list of volume projections\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1VolumeProjection" @@ -4659,12 +4749,13 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1Protocol": { + "description": "+enum", "type": "string", "title": "Protocol defines network protocols supported for things like container ports.", "x-go-package": "k8s.io/api/core/v1" }, "v1PullPolicy": { - "description": "PullPolicy describes a policy for if/when to pull a container image", + "description": "PullPolicy describes a policy for if/when to pull a container image\n+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -4674,32 +4765,32 @@ "title": "Represents a Quobyte mount that lasts the lifetime of a pod.", "properties": { "group": { - "description": "Group to map volume access to\nDefault is no group\n+optional", + "description": "group to map volume access to\nDefault is no group\n+optional", "type": "string", "x-go-name": "Group" }, "readOnly": { - "description": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional", + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, "registry": { - "description": "Registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes", + "description": "registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes", "type": "string", "x-go-name": "Registry" }, "tenant": { - "description": "Tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional", + "description": "tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional", "type": "string", "x-go-name": "Tenant" }, "user": { - "description": "User to map volume access to\nDefaults to serivceaccount user\n+optional", + "description": "user to map volume access to\nDefaults to serivceaccount user\n+optional", "type": "string", "x-go-name": "User" }, "volume": { - "description": "Volume is a string that references an already created Quobyte volume by name.", + "description": "volume is a string that references an already created Quobyte volume by name.", "type": "string", "x-go-name": "Volume" } @@ -4712,22 +4803,22 @@ "title": "Represents a Rados Block Device mount that lasts the lifetime of a pod.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", + "description": "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional", "type": "string", "x-go-name": "FSType" }, "image": { - "description": "The rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string", "x-go-name": "RBDImage" }, "keyring": { - "description": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", + "description": "keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", "type": "string", "x-go-name": "Keyring" }, "monitors": { - "description": "A collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "array", "items": { "type": "string" @@ -4735,12 +4826,12 @@ "x-go-name": "CephMonitors" }, "pool": { - "description": "The rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", + "description": "pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", "type": "string", "x-go-name": "RBDPool" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -4748,13 +4839,25 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "user": { - "description": "The rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", + "description": "user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional", "type": "string", "x-go-name": "RadosUser" } }, "x-go-package": "k8s.io/api/core/v1" }, + "v1ResourceClaim": { + "type": "object", + "title": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "properties": { + "name": { + "description": "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container.", + "type": "string", + "x-go-name": "Name" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1ResourceFieldSelector": { "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format\n+structType=atomic", "type": "object", @@ -4787,6 +4890,14 @@ "type": "object", "title": "ResourceRequirements describes the compute resource requirements.", "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.\n\n+listType=map\n+listMapKey=name\n+featureGate=DynamicResourceAllocation\n+optional", + "type": "array", + "items": { + "$ref": "#/definitions/v1ResourceClaim" + }, + "x-go-name": "Claims" + }, "limits": { "$ref": "#/definitions/v1ResourceList" }, @@ -4797,7 +4908,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1RestartPolicy": { - "description": "Only one of the following restart policies may be specified.\nIf none of the following policies is specified, the default one\nis RestartPolicyAlways.", + "description": "Only one of the following restart policies may be specified.\nIf none of the following policies is specified, the default one\nis RestartPolicyAlways.\n+enum", "type": "string", "title": "RestartPolicy describes how the container should be restarted.", "x-go-package": "k8s.io/api/core/v1" @@ -4834,22 +4945,22 @@ "type": "object", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional", "type": "string", "x-go-name": "FSType" }, "gateway": { - "description": "The host address of the ScaleIO API Gateway.", + "description": "gateway is the host address of the ScaleIO API Gateway.", "type": "string", "x-go-name": "Gateway" }, "protectionDomain": { - "description": "The name of the ScaleIO Protection Domain for the configured storage.\n+optional", + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.\n+optional", "type": "string", "x-go-name": "ProtectionDomain" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -4857,27 +4968,27 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "sslEnabled": { - "description": "Flag to enable/disable SSL communication with Gateway, default false\n+optional", + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false\n+optional", "type": "boolean", "x-go-name": "SSLEnabled" }, "storageMode": { - "description": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional", + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional", "type": "string", "x-go-name": "StorageMode" }, "storagePool": { - "description": "The ScaleIO Storage Pool associated with the protection domain.\n+optional", + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.\n+optional", "type": "string", "x-go-name": "StoragePool" }, "system": { - "description": "The name of the storage system as configured in ScaleIO.", + "description": "system is the name of the storage system as configured in ScaleIO.", "type": "string", "x-go-name": "System" }, "volumeName": { - "description": "The name of a volume already created in the ScaleIO system\nthat is associated with this volume source.", + "description": "volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source.", "type": "string", "x-go-name": "VolumeName" } @@ -4901,6 +5012,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1SeccompProfileType": { + "description": "+enum", "type": "string", "title": "SeccompProfileType defines the supported seccomp profile types.", "x-go-package": "k8s.io/api/core/v1" @@ -4952,7 +5064,7 @@ "title": "Adapts a secret into a projected volume.", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", + "description": "items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1KeyToPath" @@ -4965,7 +5077,7 @@ "x-go-name": "Name" }, "optional": { - "description": "Specify whether the Secret or its key must be defined\n+optional", + "description": "optional field specify whether the Secret or its key must be defined\n+optional", "type": "boolean", "x-go-name": "Optional" } @@ -4978,13 +5090,13 @@ "title": "Adapts a Secret into a volume.", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional", "type": "integer", "format": "int32", "x-go-name": "DefaultMode" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", + "description": "items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional", "type": "array", "items": { "$ref": "#/definitions/v1KeyToPath" @@ -4992,12 +5104,12 @@ "x-go-name": "Items" }, "optional": { - "description": "Specify whether the Secret or its keys must be defined\n+optional", + "description": "optional field specify whether the Secret or its keys must be defined\n+optional", "type": "boolean", "x-go-name": "Optional" }, "secretName": { - "description": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional", + "description": "secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional", "type": "string", "x-go-name": "SecretName" } @@ -5010,7 +5122,7 @@ "title": "SecurityContext holds security configuration that will be applied to a container.", "properties": { "allowPrivilegeEscalation": { - "description": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\n+optional", + "description": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "boolean", "x-go-name": "AllowPrivilegeEscalation" }, @@ -5018,7 +5130,7 @@ "$ref": "#/definitions/v1Capabilities" }, "privileged": { - "description": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\n+optional", + "description": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "boolean", "x-go-name": "Privileged" }, @@ -5026,12 +5138,12 @@ "$ref": "#/definitions/v1ProcMountType" }, "readOnlyRootFilesystem": { - "description": "Whether this container has a read-only root filesystem.\nDefault is false.\n+optional", + "description": "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "boolean", "x-go-name": "ReadOnlyRootFilesystem" }, "runAsGroup": { - "description": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional", + "description": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "integer", "format": "int64", "x-go-name": "RunAsGroup" @@ -5042,7 +5154,7 @@ "x-go-name": "RunAsNonRoot" }, "runAsUser": { - "description": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional", + "description": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.\n+optional", "type": "integer", "format": "int64", "x-go-name": "RunAsUser" @@ -5082,18 +5194,18 @@ "type": "object", "properties": { "audience": { - "description": "Audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional", + "description": "audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional", "type": "string", "x-go-name": "Audience" }, "expirationSeconds": { - "description": "ExpirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional", + "description": "expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional", "type": "integer", "format": "int64", "x-go-name": "ExpirationSeconds" }, "path": { - "description": "Path is the path relative to the mount point of the file to project the\ntoken into.", + "description": "path is the path relative to the mount point of the file to project the\ntoken into.", "type": "string", "x-go-name": "Path" } @@ -5121,7 +5233,7 @@ "properties": { "appProtocol": { "type": "string", - "title": "The application protocol for this port.\nThis field follows standard Kubernetes label syntax.\nUn-prefixed names are reserved for IANA standard service names (as per\nRFC-6335 and http://www.iana.org/assignments/service-names).\nNon-standard protocols should use prefixed names such as\nmycompany.com/my-custom-protocol.\n+optional" + "title": "The application protocol for this port.\nThis field follows standard Kubernetes label syntax.\nUn-prefixed names are reserved for IANA standard service names (as per\nRFC-6335 and https://www.iana.org/assignments/service-names).\nNon-standard protocols should use prefixed names such as\nmycompany.com/my-custom-protocol.\n+optional" }, "name": { "type": "string", @@ -5153,14 +5265,14 @@ "properties": { "allocateLoadBalancerNodePorts": { "type": "boolean", - "title": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically\nallocated for services with type LoadBalancer. Default is \"true\". It\nmay be set to \"false\" if the cluster load-balancer does not rely on\nNodePorts. If the caller requests specific NodePorts (by specifying a\nvalue), those requests will be respected, regardless of this field.\nThis field may only be set for services with type LoadBalancer and will\nbe cleared if the type is changed to any other type.\nThis field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.\n+featureGate=ServiceLBNodePortControl\n+optional" + "title": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically\nallocated for services with type LoadBalancer. Default is \"true\". It\nmay be set to \"false\" if the cluster load-balancer does not rely on\nNodePorts. If the caller requests specific NodePorts (by specifying a\nvalue), those requests will be respected, regardless of this field.\nThis field may only be set for services with type LoadBalancer and will\nbe cleared if the type is changed to any other type.\n+optional" }, "clusterIP": { "type": "string", "title": "clusterIP is the IP address of the service and is usually assigned\nrandomly. If an address is specified manually, is in-range (as per\nsystem configuration), and is not in use, it will be allocated to the\nservice; otherwise creation of the service will fail. This field may not\nbe changed through updates unless the type field is also being changed\nto ExternalName (which requires this field to be blank) or the type\nfield is being changed from ExternalName (in which case this field may\noptionally be specified, as describe above). Valid values are \"None\",\nempty string (\"\"), or a valid IP address. Setting this to \"None\" makes a\n\"headless service\" (no virtual IP), which is useful when direct endpoint\nconnections are preferred and proxying is not required. Only applies to\ntypes ClusterIP, NodePort, and LoadBalancer. If this field is specified\nwhen creating a Service of type ExternalName, creation will fail. This\nfield will be wiped when updating a Service to type ExternalName.\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\n+optional" }, "clusterIPs": { - "description": "ClusterIPs is a list of IP addresses assigned to this service, and are\nusually assigned randomly. If an address is specified manually, is\nin-range (as per system configuration), and is not in use, it will be\nallocated to the service; otherwise creation of the service will fail.\nThis field may not be changed through updates unless the type field is\nalso being changed to ExternalName (which requires this field to be\nempty) or the type field is being changed from ExternalName (in which\ncase this field may optionally be specified, as describe above). Valid\nvalues are \"None\", empty string (\"\"), or a valid IP address. Setting\nthis to \"None\" makes a \"headless service\" (no virtual IP), which is\nuseful when direct endpoint connections are preferred and proxying is\nnot required. Only applies to types ClusterIP, NodePort, and\nLoadBalancer. If this field is specified when creating a Service of type\nExternalName, creation will fail. This field will be wiped when updating\na Service to type ExternalName. If this field is not specified, it will\nbe initialized from the clusterIP field. If this field is specified,\nclients must ensure that clusterIPs[0] and clusterIP have the same\nvalue.\n\nUnless the \"IPv6DualStack\" feature gate is enabled, this field is\nlimited to one value, which must be the same as the clusterIP field. If\nthe feature gate is enabled, this field may hold a maximum of two\nentries (dual-stack IPs, in either order). These IPs must correspond to\nthe values of the ipFamilies field. Both clusterIPs and ipFamilies are\ngoverned by the ipFamilyPolicy field.\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\n+listType=atomic\n+optional", + "description": "ClusterIPs is a list of IP addresses assigned to this service, and are\nusually assigned randomly. If an address is specified manually, is\nin-range (as per system configuration), and is not in use, it will be\nallocated to the service; otherwise creation of the service will fail.\nThis field may not be changed through updates unless the type field is\nalso being changed to ExternalName (which requires this field to be\nempty) or the type field is being changed from ExternalName (in which\ncase this field may optionally be specified, as describe above). Valid\nvalues are \"None\", empty string (\"\"), or a valid IP address. Setting\nthis to \"None\" makes a \"headless service\" (no virtual IP), which is\nuseful when direct endpoint connections are preferred and proxying is\nnot required. Only applies to types ClusterIP, NodePort, and\nLoadBalancer. If this field is specified when creating a Service of type\nExternalName, creation will fail. This field will be wiped when updating\na Service to type ExternalName. If this field is not specified, it will\nbe initialized from the clusterIP field. If this field is specified,\nclients must ensure that clusterIPs[0] and clusterIP have the same\nvalue.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order).\nThese IPs must correspond to the values of the ipFamilies field. Both\nclusterIPs and ipFamilies are governed by the ipFamilyPolicy field.\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies\n+listType=atomic\n+optional", "type": "array", "items": { "type": "string" @@ -5179,19 +5291,19 @@ }, "externalTrafficPolicy": { "type": "string", - "title": "externalTrafficPolicy denotes if this Service desires to route external\ntraffic to node-local or cluster-wide endpoints. \"Local\" preserves the\nclient source IP and avoids a second hop for LoadBalancer and Nodeport\ntype services, but risks potentially imbalanced traffic spreading.\n\"Cluster\" obscures the client source IP and may cause a second hop to\nanother node, but should have good overall load-spreading.\n+optional" + "title": "externalTrafficPolicy describes how nodes distribute service traffic they\nreceive on one of the Service's \"externally-facing\" addresses (NodePorts,\nExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure\nthe service in a way that assumes that external load balancers will take care\nof balancing the service traffic between nodes, and so each node will deliver\ntraffic only to the node-local endpoints of the service, without masquerading\nthe client source IP. (Traffic mistakenly sent to a node with no endpoints will\nbe dropped.) The default value, \"Cluster\", uses the standard behavior of\nrouting to all endpoints evenly (possibly modified by topology and other\nfeatures). Note that traffic sent to an External IP or LoadBalancer IP from\nwithin the cluster will always get \"Cluster\" semantics, but clients sending to\na NodePort from within the cluster may need to take traffic policy into account\nwhen picking a node.\n+optional" }, "healthCheckNodePort": { "type": "integer", "format": "int32", - "title": "healthCheckNodePort specifies the healthcheck nodePort for the service.\nThis only applies when type is set to LoadBalancer and\nexternalTrafficPolicy is set to Local. If a value is specified, is\nin-range, and is not in use, it will be used. If not specified, a value\nwill be automatically allocated. External systems (e.g. load-balancers)\ncan use this port to determine if a given node holds endpoints for this\nservice or not. If this field is specified when creating a Service\nwhich does not need it, creation will fail. This field will be wiped\nwhen updating a Service to no longer need it (e.g. changing type).\n+optional" + "title": "healthCheckNodePort specifies the healthcheck nodePort for the service.\nThis only applies when type is set to LoadBalancer and\nexternalTrafficPolicy is set to Local. If a value is specified, is\nin-range, and is not in use, it will be used. If not specified, a value\nwill be automatically allocated. External systems (e.g. load-balancers)\ncan use this port to determine if a given node holds endpoints for this\nservice or not. If this field is specified when creating a Service\nwhich does not need it, creation will fail. This field will be wiped\nwhen updating a Service to no longer need it (e.g. changing type).\nThis field cannot be updated once set.\n+optional" }, "internalTrafficPolicy": { "type": "string", - "title": "InternalTrafficPolicy specifies if the cluster internal traffic\nshould be routed to all endpoints or node-local endpoints only.\n\"Cluster\" routes internal traffic to a Service to all endpoints.\n\"Local\" routes traffic to node-local endpoints only, traffic is\ndropped if no node-local endpoints are ready.\nThe default value is \"Cluster\".\n+featureGate=ServiceInternalTrafficPolicy\n+optional" + "title": "InternalTrafficPolicy describes how nodes distribute service traffic they\nreceive on the ClusterIP. If set to \"Local\", the proxy will assume that pods\nonly want to talk to endpoints of the service on the same node as the pod,\ndropping the traffic if there are no local endpoints. The default value,\n\"Cluster\", uses the standard behavior of routing to all endpoints evenly\n(possibly modified by topology and other features).\n+optional" }, "ipFamilies": { - "description": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this\nservice, and is gated by the \"IPv6DualStack\" feature gate. This field\nis usually assigned automatically based on cluster configuration and the\nipFamilyPolicy field. If this field is specified manually, the requested\nfamily is available in the cluster, and ipFamilyPolicy allows it, it\nwill be used; otherwise creation of the service will fail. This field\nis conditionally mutable: it allows for adding or removing a secondary\nIP family, but it does not allow changing the primary IP family of the\nService. Valid values are \"IPv4\" and \"IPv6\". This field only applies\nto Services of types ClusterIP, NodePort, and LoadBalancer, and does\napply to \"headless\" services. This field will be wiped when updating a\nService to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in\neither order). These families must correspond to the values of the\nclusterIPs field, if specified. Both clusterIPs and ipFamilies are\ngoverned by the ipFamilyPolicy field.\n+listType=atomic\n+optional", + "description": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this\nservice. This field is usually assigned automatically based on cluster\nconfiguration and the ipFamilyPolicy field. If this field is specified\nmanually, the requested family is available in the cluster,\nand ipFamilyPolicy allows it, it will be used; otherwise creation of\nthe service will fail. This field is conditionally mutable: it allows\nfor adding or removing a secondary IP family, but it does not allow\nchanging the primary IP family of the Service. Valid values are \"IPv4\"\nand \"IPv6\". This field only applies to Services of types ClusterIP,\nNodePort, and LoadBalancer, and does apply to \"headless\" services.\nThis field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in\neither order). These families must correspond to the values of the\nclusterIPs field, if specified. Both clusterIPs and ipFamilies are\ngoverned by the ipFamilyPolicy field.\n+listType=atomic\n+optional", "type": "array", "items": { "type": "string" @@ -5199,15 +5311,15 @@ }, "ipFamilyPolicy": { "type": "string", - "title": "IPFamilyPolicy represents the dual-stack-ness requested or required by\nthis Service, and is gated by the \"IPv6DualStack\" feature gate. If\nthere is no value provided, then this field will be set to SingleStack.\nServices can be \"SingleStack\" (a single IP family), \"PreferDualStack\"\n(two IP families on dual-stack configured clusters or a single IP family\non single-stack clusters), or \"RequireDualStack\" (two IP families on\ndual-stack configured clusters, otherwise fail). The ipFamilies and\nclusterIPs fields depend on the value of this field. This field will be\nwiped when updating a service to type ExternalName.\n+optional" + "title": "IPFamilyPolicy represents the dual-stack-ness requested or required by\nthis Service. If there is no value provided, then this field will be set\nto SingleStack. Services can be \"SingleStack\" (a single IP family),\n\"PreferDualStack\" (two IP families on dual-stack configured clusters or\na single IP family on single-stack clusters), or \"RequireDualStack\"\n(two IP families on dual-stack configured clusters, otherwise fail). The\nipFamilies and clusterIPs fields depend on the value of this field. This\nfield will be wiped when updating a service to type ExternalName.\n+optional" }, "loadBalancerClass": { "type": "string", - "title": "loadBalancerClass is the class of the load balancer implementation this Service belongs to.\nIf specified, the value of this field must be a label-style identifier, with an optional prefix,\ne.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users.\nThis field can only be set when the Service type is 'LoadBalancer'. If not set, the default load\nbalancer implementation is used, today this is typically done through the cloud provider integration,\nbut should apply for any default implementation. If set, it is assumed that a load balancer\nimplementation is watching for Services with a matching class. Any default load balancer\nimplementation (e.g. cloud providers) should ignore Services that set this field.\nThis field can only be set when creating or updating a Service to type 'LoadBalancer'.\nOnce set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\n+featureGate=LoadBalancerClass\n+optional" + "title": "loadBalancerClass is the class of the load balancer implementation this Service belongs to.\nIf specified, the value of this field must be a label-style identifier, with an optional prefix,\ne.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users.\nThis field can only be set when the Service type is 'LoadBalancer'. If not set, the default load\nbalancer implementation is used, today this is typically done through the cloud provider integration,\nbut should apply for any default implementation. If set, it is assumed that a load balancer\nimplementation is watching for Services with a matching class. Any default load balancer\nimplementation (e.g. cloud providers) should ignore Services that set this field.\nThis field can only be set when creating or updating a Service to type 'LoadBalancer'.\nOnce set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.\n+optional" }, "loadBalancerIP": { "type": "string", - "title": "Only applies to Service Type: LoadBalancer\nLoadBalancer will get created with the IP specified in this field.\nThis feature depends on whether the underlying cloud-provider supports specifying\nthe loadBalancerIP when a load balancer is created.\nThis field will be ignored if the cloud-provider does not support the feature.\n+optional" + "title": "Only applies to Service Type: LoadBalancer.\nThis feature depends on whether the underlying cloud-provider supports specifying\nthe loadBalancerIP when a load balancer is created.\nThis field will be ignored if the cloud-provider does not support the feature.\nDeprecated: This field was under-specified and its meaning varies across implementations,\nand it cannot support dual-stack.\nAs of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.\nThis field may be removed in a future API version.\n+optional" }, "loadBalancerSourceRanges": { "type": "array", @@ -5285,12 +5397,12 @@ "title": "Represents a StorageOS persistent volume resource.", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", + "description": "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", "type": "string", "x-go-name": "FSType" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", + "description": "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional", "type": "boolean", "x-go-name": "ReadOnly" }, @@ -5298,12 +5410,12 @@ "$ref": "#/definitions/v1LocalObjectReference" }, "volumeName": { - "description": "VolumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace.", + "description": "volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace.", "type": "string", "x-go-name": "VolumeName" }, "volumeNamespace": { - "description": "VolumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional", + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional", "type": "string", "x-go-name": "VolumeNamespace" } @@ -5343,10 +5455,12 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1TaintEffect": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, "v1TerminationMessagePolicy": { + "description": "+enum", "type": "string", "title": "TerminationMessagePolicy describes how termination messages are retrieved from a container.", "x-go-package": "k8s.io/api/core/v1" @@ -5388,6 +5502,7 @@ "x-go-package": "k8s.io/api/core/v1" }, "v1TolerationOperator": { + "description": "+enum", "type": "string", "title": "A toleration operator is the set of operators that can be used in a toleration.", "x-go-package": "k8s.io/api/core/v1" @@ -5399,14 +5514,34 @@ "labelSelector": { "$ref": "#/definitions/v1LabelSelector" }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. Keys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n+listType=atomic\n+optional", + "type": "array", + "items": { + "type": "string" + }, + "x-go-name": "MatchLabelKeys" + }, "maxSkew": { - "description": "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 1/1/0:\n+-------+-------+-------+\n zone1 | zone2 | zone3 |\n+-------+-------+-------+\n P | P | |\n+-------+-------+-------+\nif MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1;\nscheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)\nviolate MaxSkew(1).\nif MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed.", + "description": "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nThe global minimum is the minimum number of matching pods in an eligible domain\nor zero if the number of eligible domains is less than MinDomains.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 2/2/1:\nIn this case, the global minimum is 1.\n+-------+-------+-------+\n zone1 | zone2 | zone3 |\n+-------+-------+-------+\n P P | P P | P |\n+-------+-------+-------+\nif MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\nviolate MaxSkew(1).\nif MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed.", "type": "integer", "format": "int32", "x-go-name": "MaxSkew" }, + "minDomains": { + "description": "MinDomains indicates a minimum number of eligible domains.\nWhen the number of eligible domains with matching topology keys is less than minDomains,\nPod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed.\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\nthis value has no effect on scheduling.\nAs a result, when the number of eligible domains is less than minDomains,\nscheduler won't schedule more than maxSkew Pods to those domains.\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\nValid values are integers greater than 0.\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\nlabelSelector spread as 2/2/2:\n+-------+-------+-------+\n zone1 | zone2 | zone3 |\n+-------+-------+-------+\n P P | P P | P P |\n+-------+-------+-------+\nThe number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0.\nIn this situation, new pod with the same labelSelector cannot be scheduled,\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\nit will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).\n+optional", + "type": "integer", + "format": "int32", + "x-go-name": "MinDomains" + }, + "nodeAffinityPolicy": { + "$ref": "#/definitions/v1NodeInclusionPolicy" + }, + "nodeTaintsPolicy": { + "$ref": "#/definitions/v1NodeInclusionPolicy" + }, "topologyKey": { - "description": "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nIt's a required field.", + "description": "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nWe define a domain as a particular instance of a topology.\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\nnodeAffinityPolicy and nodeTaintsPolicy.\ne.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology.\nAnd, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology.\nIt's a required field.", "type": "string", "x-go-name": "TopologyKey" }, @@ -5438,12 +5573,39 @@ }, "x-go-package": "k8s.io/api/core/v1" }, + "v1TypedObjectReference": { + "type": "object", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.\n+optional", + "type": "string", + "x-go-name": "APIGroup" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string", + "x-go-name": "Kind" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string", + "x-go-name": "Name" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\n+featureGate=CrossNamespaceVolumeDataSource\n+optional", + "type": "string", + "x-go-name": "Namespace" + } + }, + "x-go-package": "k8s.io/api/core/v1" + }, "v1URIScheme": { - "description": "URIScheme identifies the scheme used for connection to a host for Get actions", + "description": "URIScheme identifies the scheme used for connection to a host for Get actions\n+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, "v1UnsatisfiableConstraintAction": { + "description": "+enum", "type": "string", "x-go-package": "k8s.io/api/core/v1" }, @@ -5506,7 +5668,7 @@ "$ref": "#/definitions/v1ISCSIVolumeSource" }, "name": { - "description": "Volume's name.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string", "x-go-name": "Name" }, @@ -5622,22 +5784,22 @@ "title": "Represents a vSphere volume resource.", "properties": { "fsType": { - "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", + "description": "fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional", "type": "string", "x-go-name": "FSType" }, "storagePolicyID": { - "description": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional", + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional", "type": "string", "x-go-name": "StoragePolicyID" }, "storagePolicyName": { - "description": "Storage Policy Based Management (SPBM) profile name.\n+optional", + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.\n+optional", "type": "string", "x-go-name": "StoragePolicyName" }, "volumePath": { - "description": "Path that identifies vSphere volume vmdk", + "description": "volumePath is the path that identifies vSphere volume vmdk", "type": "string", "x-go-name": "VolumePath" } diff --git a/pkg/api/event.pb.go b/pkg/api/event.pb.go index a0e531e6c42..e92fb2334e8 100644 --- a/pkg/api/event.pb.go +++ b/pkg/api/event.pb.go @@ -45,6 +45,7 @@ const ( Cause_Evicted Cause = 1 Cause_OOM Cause = 2 Cause_DeadlineExceeded Cause = 3 + Cause_Rejected Cause = 4 ) var Cause_name = map[int32]string{ @@ -52,6 +53,7 @@ var Cause_name = map[int32]string{ 1: "Evicted", 2: "OOM", 3: "DeadlineExceeded", + 4: "Rejected", } var Cause_value = map[string]int32{ @@ -59,6 +61,7 @@ var Cause_value = map[string]int32{ "Evicted": 1, "OOM": 2, "DeadlineExceeded": 3, + "Rejected": 4, } func (x Cause) String() string { @@ -211,81 +214,6 @@ func (m *JobQueuedEvent) GetCreated() time.Time { return time.Time{} } -type JobDuplicateFoundEvent struct { - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` - JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` - Queue string `protobuf:"bytes,3,opt,name=queue,proto3" json:"queue,omitempty"` - Created time.Time `protobuf:"bytes,4,opt,name=created,proto3,stdtime" json:"created"` - OriginalJobId string `protobuf:"bytes,5,opt,name=original_job_id,json=originalJobId,proto3" json:"originalJobId,omitempty"` -} - -func (m *JobDuplicateFoundEvent) Reset() { *m = JobDuplicateFoundEvent{} } -func (*JobDuplicateFoundEvent) ProtoMessage() {} -func (*JobDuplicateFoundEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{2} -} -func (m *JobDuplicateFoundEvent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobDuplicateFoundEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_JobDuplicateFoundEvent.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *JobDuplicateFoundEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobDuplicateFoundEvent.Merge(m, src) -} -func (m *JobDuplicateFoundEvent) XXX_Size() int { - return m.Size() -} -func (m *JobDuplicateFoundEvent) XXX_DiscardUnknown() { - xxx_messageInfo_JobDuplicateFoundEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_JobDuplicateFoundEvent proto.InternalMessageInfo - -func (m *JobDuplicateFoundEvent) GetJobId() string { - if m != nil { - return m.JobId - } - return "" -} - -func (m *JobDuplicateFoundEvent) GetJobSetId() string { - if m != nil { - return m.JobSetId - } - return "" -} - -func (m *JobDuplicateFoundEvent) GetQueue() string { - if m != nil { - return m.Queue - } - return "" -} - -func (m *JobDuplicateFoundEvent) GetCreated() time.Time { - if m != nil { - return m.Created - } - return time.Time{} -} - -func (m *JobDuplicateFoundEvent) GetOriginalJobId() string { - if m != nil { - return m.OriginalJobId - } - return "" -} - type JobLeasedEvent struct { JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` @@ -297,7 +225,7 @@ type JobLeasedEvent struct { func (m *JobLeasedEvent) Reset() { *m = JobLeasedEvent{} } func (*JobLeasedEvent) ProtoMessage() {} func (*JobLeasedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{3} + return fileDescriptor_7758595c3bb8cf56, []int{2} } func (m *JobLeasedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -376,7 +304,7 @@ type JobLeaseReturnedEvent struct { func (m *JobLeaseReturnedEvent) Reset() { *m = JobLeaseReturnedEvent{} } func (*JobLeaseReturnedEvent) ProtoMessage() {} func (*JobLeaseReturnedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{4} + return fileDescriptor_7758595c3bb8cf56, []int{3} } func (m *JobLeaseReturnedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -478,7 +406,7 @@ type JobLeaseExpiredEvent struct { func (m *JobLeaseExpiredEvent) Reset() { *m = JobLeaseExpiredEvent{} } func (*JobLeaseExpiredEvent) ProtoMessage() {} func (*JobLeaseExpiredEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{5} + return fileDescriptor_7758595c3bb8cf56, []int{4} } func (m *JobLeaseExpiredEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +478,7 @@ type JobPendingEvent struct { func (m *JobPendingEvent) Reset() { *m = JobPendingEvent{} } func (*JobPendingEvent) ProtoMessage() {} func (*JobPendingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{6} + return fileDescriptor_7758595c3bb8cf56, []int{5} } func (m *JobPendingEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -658,7 +586,7 @@ type JobRunningEvent struct { func (m *JobRunningEvent) Reset() { *m = JobRunningEvent{} } func (*JobRunningEvent) ProtoMessage() {} func (*JobRunningEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{7} + return fileDescriptor_7758595c3bb8cf56, []int{6} } func (m *JobRunningEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -774,7 +702,7 @@ type JobIngressInfoEvent struct { func (m *JobIngressInfoEvent) Reset() { *m = JobIngressInfoEvent{} } func (*JobIngressInfoEvent) ProtoMessage() {} func (*JobIngressInfoEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{8} + return fileDescriptor_7758595c3bb8cf56, []int{7} } func (m *JobIngressInfoEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -897,7 +825,7 @@ type JobUnableToScheduleEvent struct { func (m *JobUnableToScheduleEvent) Reset() { *m = JobUnableToScheduleEvent{} } func (*JobUnableToScheduleEvent) ProtoMessage() {} func (*JobUnableToScheduleEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{9} + return fileDescriptor_7758595c3bb8cf56, []int{8} } func (m *JobUnableToScheduleEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1023,7 +951,7 @@ type JobFailedEvent struct { func (m *JobFailedEvent) Reset() { *m = JobFailedEvent{} } func (*JobFailedEvent) ProtoMessage() {} func (*JobFailedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{10} + return fileDescriptor_7758595c3bb8cf56, []int{9} } func (m *JobFailedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1090,7 @@ type JobPreemptingEvent struct { func (m *JobPreemptingEvent) Reset() { *m = JobPreemptingEvent{} } func (*JobPreemptingEvent) ProtoMessage() {} func (*JobPreemptingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{11} + return fileDescriptor_7758595c3bb8cf56, []int{10} } func (m *JobPreemptingEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1240,7 +1168,7 @@ type JobPreemptedEvent struct { func (m *JobPreemptedEvent) Reset() { *m = JobPreemptedEvent{} } func (*JobPreemptedEvent) ProtoMessage() {} func (*JobPreemptedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{12} + return fileDescriptor_7758595c3bb8cf56, []int{11} } func (m *JobPreemptedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,50 +1253,6 @@ func (m *JobPreemptedEvent) GetPreemptiveRunId() string { return "" } -// Only used internally by Armada -type JobFailedEventCompressed struct { - Event []byte `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` -} - -func (m *JobFailedEventCompressed) Reset() { *m = JobFailedEventCompressed{} } -func (*JobFailedEventCompressed) ProtoMessage() {} -func (*JobFailedEventCompressed) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{13} -} -func (m *JobFailedEventCompressed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobFailedEventCompressed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_JobFailedEventCompressed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *JobFailedEventCompressed) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobFailedEventCompressed.Merge(m, src) -} -func (m *JobFailedEventCompressed) XXX_Size() int { - return m.Size() -} -func (m *JobFailedEventCompressed) XXX_DiscardUnknown() { - xxx_messageInfo_JobFailedEventCompressed.DiscardUnknown(m) -} - -var xxx_messageInfo_JobFailedEventCompressed proto.InternalMessageInfo - -func (m *JobFailedEventCompressed) GetEvent() []byte { - if m != nil { - return m.Event - } - return nil -} - type JobSucceededEvent struct { JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` @@ -1385,7 +1269,7 @@ type JobSucceededEvent struct { func (m *JobSucceededEvent) Reset() { *m = JobSucceededEvent{} } func (*JobSucceededEvent) ProtoMessage() {} func (*JobSucceededEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{14} + return fileDescriptor_7758595c3bb8cf56, []int{12} } func (m *JobSucceededEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1502,7 +1386,7 @@ type JobUtilisationEvent struct { func (m *JobUtilisationEvent) Reset() { *m = JobUtilisationEvent{} } func (*JobUtilisationEvent) ProtoMessage() {} func (*JobUtilisationEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{15} + return fileDescriptor_7758595c3bb8cf56, []int{13} } func (m *JobUtilisationEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1627,7 +1511,7 @@ type JobReprioritizingEvent struct { func (m *JobReprioritizingEvent) Reset() { *m = JobReprioritizingEvent{} } func (*JobReprioritizingEvent) ProtoMessage() {} func (*JobReprioritizingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{16} + return fileDescriptor_7758595c3bb8cf56, []int{14} } func (m *JobReprioritizingEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1710,7 +1594,7 @@ type JobReprioritizedEvent struct { func (m *JobReprioritizedEvent) Reset() { *m = JobReprioritizedEvent{} } func (*JobReprioritizedEvent) ProtoMessage() {} func (*JobReprioritizedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{17} + return fileDescriptor_7758595c3bb8cf56, []int{15} } func (m *JobReprioritizedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1793,7 +1677,7 @@ type JobCancellingEvent struct { func (m *JobCancellingEvent) Reset() { *m = JobCancellingEvent{} } func (*JobCancellingEvent) ProtoMessage() {} func (*JobCancellingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{18} + return fileDescriptor_7758595c3bb8cf56, []int{16} } func (m *JobCancellingEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1876,7 +1760,7 @@ type JobCancelledEvent struct { func (m *JobCancelledEvent) Reset() { *m = JobCancelledEvent{} } func (*JobCancelledEvent) ProtoMessage() {} func (*JobCancelledEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{19} + return fileDescriptor_7758595c3bb8cf56, []int{17} } func (m *JobCancelledEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1963,7 +1847,7 @@ type JobTerminatedEvent struct { func (m *JobTerminatedEvent) Reset() { *m = JobTerminatedEvent{} } func (*JobTerminatedEvent) ProtoMessage() {} func (*JobTerminatedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{20} + return fileDescriptor_7758595c3bb8cf56, []int{18} } func (m *JobTerminatedEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2062,94 +1946,10 @@ func (m *JobTerminatedEvent) GetReason() string { return "" } -type JobUpdatedEvent struct { - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` - JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` - Queue string `protobuf:"bytes,3,opt,name=queue,proto3" json:"queue,omitempty"` - Created time.Time `protobuf:"bytes,4,opt,name=created,proto3,stdtime" json:"created"` - Requestor string `protobuf:"bytes,5,opt,name=requestor,proto3" json:"requestor,omitempty"` - Job Job `protobuf:"bytes,6,opt,name=job,proto3" json:"job"` -} - -func (m *JobUpdatedEvent) Reset() { *m = JobUpdatedEvent{} } -func (*JobUpdatedEvent) ProtoMessage() {} -func (*JobUpdatedEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{21} -} -func (m *JobUpdatedEvent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobUpdatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_JobUpdatedEvent.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *JobUpdatedEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobUpdatedEvent.Merge(m, src) -} -func (m *JobUpdatedEvent) XXX_Size() int { - return m.Size() -} -func (m *JobUpdatedEvent) XXX_DiscardUnknown() { - xxx_messageInfo_JobUpdatedEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_JobUpdatedEvent proto.InternalMessageInfo - -func (m *JobUpdatedEvent) GetJobId() string { - if m != nil { - return m.JobId - } - return "" -} - -func (m *JobUpdatedEvent) GetJobSetId() string { - if m != nil { - return m.JobSetId - } - return "" -} - -func (m *JobUpdatedEvent) GetQueue() string { - if m != nil { - return m.Queue - } - return "" -} - -func (m *JobUpdatedEvent) GetCreated() time.Time { - if m != nil { - return m.Created - } - return time.Time{} -} - -func (m *JobUpdatedEvent) GetRequestor() string { - if m != nil { - return m.Requestor - } - return "" -} - -func (m *JobUpdatedEvent) GetJob() Job { - if m != nil { - return m.Job - } - return Job{} -} - type EventMessage struct { // Types that are valid to be assigned to Events: // *EventMessage_Submitted // *EventMessage_Queued - // *EventMessage_DuplicateFound // *EventMessage_Leased // *EventMessage_LeaseReturned // *EventMessage_LeaseExpired @@ -2165,8 +1965,6 @@ type EventMessage struct { // *EventMessage_Utilisation // *EventMessage_IngressInfo // *EventMessage_Reprioritizing - // *EventMessage_Updated - // *EventMessage_FailedCompressed // *EventMessage_Preempted // *EventMessage_Preempting Events isEventMessage_Events `protobuf_oneof:"events"` @@ -2175,7 +1973,7 @@ type EventMessage struct { func (m *EventMessage) Reset() { *m = EventMessage{} } func (*EventMessage) ProtoMessage() {} func (*EventMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{22} + return fileDescriptor_7758595c3bb8cf56, []int{19} } func (m *EventMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2216,9 +2014,6 @@ type EventMessage_Submitted struct { type EventMessage_Queued struct { Queued *JobQueuedEvent `protobuf:"bytes,2,opt,name=queued,proto3,oneof" json:"queued,omitempty"` } -type EventMessage_DuplicateFound struct { - DuplicateFound *JobDuplicateFoundEvent `protobuf:"bytes,16,opt,name=duplicate_found,json=duplicateFound,proto3,oneof" json:"duplicateFound,omitempty"` -} type EventMessage_Leased struct { Leased *JobLeasedEvent `protobuf:"bytes,3,opt,name=leased,proto3,oneof" json:"leased,omitempty"` } @@ -2264,12 +2059,6 @@ type EventMessage_IngressInfo struct { type EventMessage_Reprioritizing struct { Reprioritizing *JobReprioritizingEvent `protobuf:"bytes,18,opt,name=reprioritizing,proto3,oneof" json:"reprioritizing,omitempty"` } -type EventMessage_Updated struct { - Updated *JobUpdatedEvent `protobuf:"bytes,19,opt,name=updated,proto3,oneof" json:"updated,omitempty"` -} -type EventMessage_FailedCompressed struct { - FailedCompressed *JobFailedEventCompressed `protobuf:"bytes,20,opt,name=failedCompressed,proto3,oneof" json:"failedCompressed,omitempty"` -} type EventMessage_Preempted struct { Preempted *JobPreemptedEvent `protobuf:"bytes,21,opt,name=preempted,proto3,oneof" json:"preempted,omitempty"` } @@ -2279,7 +2068,6 @@ type EventMessage_Preempting struct { func (*EventMessage_Submitted) isEventMessage_Events() {} func (*EventMessage_Queued) isEventMessage_Events() {} -func (*EventMessage_DuplicateFound) isEventMessage_Events() {} func (*EventMessage_Leased) isEventMessage_Events() {} func (*EventMessage_LeaseReturned) isEventMessage_Events() {} func (*EventMessage_LeaseExpired) isEventMessage_Events() {} @@ -2295,8 +2083,6 @@ func (*EventMessage_Terminated) isEventMessage_Events() {} func (*EventMessage_Utilisation) isEventMessage_Events() {} func (*EventMessage_IngressInfo) isEventMessage_Events() {} func (*EventMessage_Reprioritizing) isEventMessage_Events() {} -func (*EventMessage_Updated) isEventMessage_Events() {} -func (*EventMessage_FailedCompressed) isEventMessage_Events() {} func (*EventMessage_Preempted) isEventMessage_Events() {} func (*EventMessage_Preempting) isEventMessage_Events() {} @@ -2321,13 +2107,6 @@ func (m *EventMessage) GetQueued() *JobQueuedEvent { return nil } -func (m *EventMessage) GetDuplicateFound() *JobDuplicateFoundEvent { - if x, ok := m.GetEvents().(*EventMessage_DuplicateFound); ok { - return x.DuplicateFound - } - return nil -} - func (m *EventMessage) GetLeased() *JobLeasedEvent { if x, ok := m.GetEvents().(*EventMessage_Leased); ok { return x.Leased @@ -2433,20 +2212,6 @@ func (m *EventMessage) GetReprioritizing() *JobReprioritizingEvent { return nil } -func (m *EventMessage) GetUpdated() *JobUpdatedEvent { - if x, ok := m.GetEvents().(*EventMessage_Updated); ok { - return x.Updated - } - return nil -} - -func (m *EventMessage) GetFailedCompressed() *JobFailedEventCompressed { - if x, ok := m.GetEvents().(*EventMessage_FailedCompressed); ok { - return x.FailedCompressed - } - return nil -} - func (m *EventMessage) GetPreempted() *JobPreemptedEvent { if x, ok := m.GetEvents().(*EventMessage_Preempted); ok { return x.Preempted @@ -2466,7 +2231,6 @@ func (*EventMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*EventMessage_Submitted)(nil), (*EventMessage_Queued)(nil), - (*EventMessage_DuplicateFound)(nil), (*EventMessage_Leased)(nil), (*EventMessage_LeaseReturned)(nil), (*EventMessage_LeaseExpired)(nil), @@ -2482,8 +2246,6 @@ func (*EventMessage) XXX_OneofWrappers() []interface{} { (*EventMessage_Utilisation)(nil), (*EventMessage_IngressInfo)(nil), (*EventMessage_Reprioritizing)(nil), - (*EventMessage_Updated)(nil), - (*EventMessage_FailedCompressed)(nil), (*EventMessage_Preempted)(nil), (*EventMessage_Preempting)(nil), } @@ -2500,7 +2262,7 @@ type ContainerStatus struct { func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{23} + return fileDescriptor_7758595c3bb8cf56, []int{20} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2573,7 +2335,7 @@ type EventStreamMessage struct { func (m *EventStreamMessage) Reset() { *m = EventStreamMessage{} } func (*EventStreamMessage) ProtoMessage() {} func (*EventStreamMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{24} + return fileDescriptor_7758595c3bb8cf56, []int{21} } func (m *EventStreamMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2623,14 +2385,12 @@ type JobSetRequest struct { FromMessageId string `protobuf:"bytes,3,opt,name=from_message_id,json=fromMessageId,proto3" json:"fromMessageId,omitempty"` Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"` ErrorIfMissing bool `protobuf:"varint,5,opt,name=errorIfMissing,proto3" json:"errorIfMissing,omitempty"` - ForceLegacy bool `protobuf:"varint,6,opt,name=force_legacy,json=forceLegacy,proto3" json:"forceLegacy,omitempty"` - ForceNew bool `protobuf:"varint,7,opt,name=force_new,json=forceNew,proto3" json:"forceNew,omitempty"` } func (m *JobSetRequest) Reset() { *m = JobSetRequest{} } func (*JobSetRequest) ProtoMessage() {} func (*JobSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{25} + return fileDescriptor_7758595c3bb8cf56, []int{22} } func (m *JobSetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2694,32 +2454,16 @@ func (m *JobSetRequest) GetErrorIfMissing() bool { return false } -func (m *JobSetRequest) GetForceLegacy() bool { - if m != nil { - return m.ForceLegacy - } - return false -} - -func (m *JobSetRequest) GetForceNew() bool { - if m != nil { - return m.ForceNew - } - return false -} - type WatchRequest struct { - Queue string `protobuf:"bytes,1,opt,name=queue,proto3" json:"queue,omitempty"` - JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` - FromId string `protobuf:"bytes,3,opt,name=from_id,json=fromId,proto3" json:"fromId,omitempty"` - ForceLegacy bool `protobuf:"varint,4,opt,name=force_legacy,json=forceLegacy,proto3" json:"forceLegacy,omitempty"` - ForceNew bool `protobuf:"varint,5,opt,name=force_new,json=forceNew,proto3" json:"forceNew,omitempty"` + Queue string `protobuf:"bytes,1,opt,name=queue,proto3" json:"queue,omitempty"` + JobSetId string `protobuf:"bytes,2,opt,name=job_set_id,json=jobSetId,proto3" json:"jobSetId,omitempty"` + FromId string `protobuf:"bytes,3,opt,name=from_id,json=fromId,proto3" json:"fromId,omitempty"` } func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7758595c3bb8cf56, []int{26} + return fileDescriptor_7758595c3bb8cf56, []int{23} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2769,25 +2513,10 @@ func (m *WatchRequest) GetFromId() string { return "" } -func (m *WatchRequest) GetForceLegacy() bool { - if m != nil { - return m.ForceLegacy - } - return false -} - -func (m *WatchRequest) GetForceNew() bool { - if m != nil { - return m.ForceNew - } - return false -} - func init() { proto.RegisterEnum("api.Cause", Cause_name, Cause_value) proto.RegisterType((*JobSubmittedEvent)(nil), "api.JobSubmittedEvent") proto.RegisterType((*JobQueuedEvent)(nil), "api.JobQueuedEvent") - proto.RegisterType((*JobDuplicateFoundEvent)(nil), "api.JobDuplicateFoundEvent") proto.RegisterType((*JobLeasedEvent)(nil), "api.JobLeasedEvent") proto.RegisterType((*JobLeaseReturnedEvent)(nil), "api.JobLeaseReturnedEvent") proto.RegisterType((*JobLeaseExpiredEvent)(nil), "api.JobLeaseExpiredEvent") @@ -2800,7 +2529,6 @@ func init() { proto.RegisterMapType((map[string]int32)(nil), "api.JobFailedEvent.ExitCodesEntry") proto.RegisterType((*JobPreemptingEvent)(nil), "api.JobPreemptingEvent") proto.RegisterType((*JobPreemptedEvent)(nil), "api.JobPreemptedEvent") - proto.RegisterType((*JobFailedEventCompressed)(nil), "api.JobFailedEventCompressed") proto.RegisterType((*JobSucceededEvent)(nil), "api.JobSucceededEvent") proto.RegisterType((*JobUtilisationEvent)(nil), "api.JobUtilisationEvent") proto.RegisterMapType((map[string]resource.Quantity)(nil), "api.JobUtilisationEvent.MaxResourcesForPeriodEntry") @@ -2810,7 +2538,6 @@ func init() { proto.RegisterType((*JobCancellingEvent)(nil), "api.JobCancellingEvent") proto.RegisterType((*JobCancelledEvent)(nil), "api.JobCancelledEvent") proto.RegisterType((*JobTerminatedEvent)(nil), "api.JobTerminatedEvent") - proto.RegisterType((*JobUpdatedEvent)(nil), "api.JobUpdatedEvent") proto.RegisterType((*EventMessage)(nil), "api.EventMessage") proto.RegisterType((*ContainerStatus)(nil), "api.ContainerStatus") proto.RegisterType((*EventStreamMessage)(nil), "api.EventStreamMessage") @@ -2821,167 +2548,154 @@ func init() { func init() { proto.RegisterFile("pkg/api/event.proto", fileDescriptor_7758595c3bb8cf56) } var fileDescriptor_7758595c3bb8cf56 = []byte{ - // 2546 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1b, 0xc7, - 0xf5, 0xd7, 0x52, 0xe2, 0xd7, 0x50, 0x9f, 0xa3, 0x0f, 0xaf, 0x69, 0x5b, 0x14, 0x18, 0xe0, 0x1f, - 0xc5, 0x88, 0xc9, 0xfc, 0xe5, 0xa4, 0x08, 0x8c, 0xa0, 0x85, 0x29, 0xcb, 0x89, 0x84, 0x38, 0x76, - 0x28, 0x1b, 0x69, 0x8b, 0x00, 0xcc, 0x72, 0x77, 0x44, 0xad, 0x44, 0xee, 0x6c, 0xf6, 0xc3, 0xb6, - 0x62, 0x04, 0x08, 0x5a, 0xb4, 0xc8, 0xa5, 0x68, 0x8a, 0xf6, 0x9e, 0x00, 0xbd, 0xf5, 0xd4, 0x4b, - 0xaf, 0x3d, 0x14, 0x3d, 0xa4, 0x37, 0x17, 0xbd, 0xe4, 0xc4, 0xb6, 0x76, 0x52, 0x14, 0x3c, 0xf4, - 0xde, 0x5b, 0x31, 0x6f, 0x66, 0xb9, 0x33, 0x2b, 0x1a, 0x92, 0x99, 0x26, 0x35, 0x08, 0x5e, 0x12, - 0xf3, 0xf7, 0x66, 0xde, 0xbc, 0x79, 0xf3, 0x7b, 0x33, 0x6f, 0xf6, 0x8d, 0xd0, 0xa2, 0x7b, 0xd8, - 0xaa, 0x1a, 0xae, 0x5d, 0x25, 0x77, 0x89, 0x13, 0x54, 0x5c, 0x8f, 0x06, 0x14, 0x4f, 0x1a, 0xae, - 0x5d, 0x2c, 0xb5, 0x28, 0x6d, 0xb5, 0x49, 0x15, 0xa0, 0x66, 0xb8, 0x57, 0x0d, 0xec, 0x0e, 0xf1, - 0x03, 0xa3, 0xe3, 0xf2, 0x56, 0xc5, 0xa5, 0xa8, 0xab, 0x1f, 0x36, 0x3b, 0x76, 0x90, 0x44, 0xf7, - 0x89, 0xd1, 0x0e, 0xf6, 0x05, 0x7a, 0x2e, 0xa9, 0x8c, 0x74, 0xdc, 0xe0, 0x48, 0x08, 0x2f, 0xb5, - 0xec, 0x60, 0x3f, 0x6c, 0x56, 0x4c, 0xda, 0xa9, 0xb6, 0x68, 0x8b, 0xc6, 0xad, 0xd8, 0x2f, 0xf8, - 0x01, 0xff, 0x12, 0xcd, 0xcf, 0x0b, 0x5d, 0x6c, 0x10, 0xc3, 0x71, 0x68, 0x60, 0x04, 0x36, 0x75, - 0x7c, 0x21, 0x7d, 0xf9, 0xf0, 0x55, 0xbf, 0x62, 0x53, 0x26, 0xed, 0x18, 0xe6, 0xbe, 0xed, 0x10, - 0xef, 0xa8, 0x1a, 0xd9, 0xe4, 0x11, 0x9f, 0x86, 0x9e, 0x49, 0xaa, 0x2d, 0xe2, 0x10, 0xcf, 0x08, - 0x88, 0xc5, 0x7b, 0x95, 0x7f, 0x95, 0x42, 0x0b, 0x3b, 0xb4, 0xb9, 0x0b, 0x33, 0x09, 0x88, 0xb5, - 0xc5, 0xbc, 0x81, 0x2f, 0xa2, 0xcc, 0x01, 0x6d, 0x36, 0x6c, 0x4b, 0xd7, 0xd6, 0xb4, 0xf5, 0x7c, - 0x6d, 0xb1, 0xd7, 0x2d, 0xcd, 0x1d, 0xd0, 0xe6, 0xb6, 0xf5, 0x22, 0xed, 0xd8, 0x01, 0xcc, 0xa1, - 0x9e, 0x06, 0x00, 0xbf, 0x8c, 0x10, 0x6b, 0xeb, 0x93, 0x80, 0xb5, 0x4f, 0x41, 0xfb, 0x95, 0x5e, - 0xb7, 0x84, 0x0f, 0x68, 0x73, 0x97, 0x04, 0x4a, 0x97, 0x5c, 0x84, 0xe1, 0x17, 0x50, 0xfa, 0xfd, - 0x90, 0x84, 0x44, 0x9f, 0x8c, 0x07, 0x00, 0x40, 0x1e, 0x00, 0x00, 0xbc, 0x8d, 0xb2, 0xa6, 0x47, - 0x98, 0xcd, 0xfa, 0xd4, 0x9a, 0xb6, 0x5e, 0xd8, 0x28, 0x56, 0xb8, 0x23, 0x2a, 0x91, 0xbb, 0x2a, - 0xb7, 0xa3, 0x15, 0xaa, 0x2d, 0x7e, 0xde, 0x2d, 0x4d, 0xf4, 0xba, 0xa5, 0xa8, 0xcb, 0x27, 0x7f, - 0x2d, 0x69, 0xf5, 0xe8, 0x07, 0x7e, 0x1e, 0x4d, 0x1e, 0xd0, 0xa6, 0x9e, 0x06, 0x35, 0xb9, 0x8a, - 0xe1, 0xda, 0x95, 0x1d, 0xda, 0xac, 0x15, 0x44, 0x27, 0x26, 0xac, 0xb3, 0xff, 0x94, 0xff, 0xa9, - 0xa1, 0xd9, 0x1d, 0xda, 0x7c, 0x9b, 0x19, 0x30, 0xda, 0x3e, 0x29, 0xff, 0x2e, 0x85, 0x56, 0x76, - 0x68, 0xf3, 0x5a, 0xe8, 0xb6, 0x6d, 0xd3, 0x08, 0xc8, 0x75, 0x1a, 0x3a, 0x23, 0x4e, 0x83, 0x4d, - 0x34, 0x47, 0x3d, 0xbb, 0x65, 0x3b, 0x46, 0xbb, 0x21, 0x26, 0x98, 0x86, 0xf1, 0xcf, 0xf5, 0xba, - 0xa5, 0x33, 0x91, 0x68, 0x27, 0x31, 0xd1, 0x19, 0x45, 0x50, 0xfe, 0x2c, 0x05, 0x14, 0x79, 0x93, - 0x18, 0xfe, 0xa8, 0x87, 0xcd, 0x77, 0x10, 0x32, 0xdb, 0xa1, 0x1f, 0x10, 0x2f, 0x76, 0xd5, 0x99, - 0x5e, 0xb7, 0xb4, 0x28, 0x50, 0xc5, 0xd8, 0x7c, 0x1f, 0x2c, 0xff, 0x7c, 0x0a, 0x2d, 0x47, 0x2e, - 0xaa, 0x93, 0x20, 0xf4, 0x9c, 0xb1, 0xa7, 0x06, 0x7a, 0x0a, 0xbf, 0x88, 0x32, 0x1e, 0x31, 0x7c, - 0xea, 0xe8, 0x19, 0xe8, 0xb3, 0xd4, 0xeb, 0x96, 0xe6, 0x39, 0x22, 0x75, 0x10, 0x6d, 0xf0, 0xf7, - 0xd0, 0xcc, 0x61, 0xd8, 0x24, 0x9e, 0x43, 0x02, 0xe2, 0xb3, 0x81, 0xb2, 0xd0, 0xa9, 0xd8, 0xeb, - 0x96, 0x56, 0x62, 0x81, 0x32, 0xd6, 0xb4, 0x8c, 0x33, 0x33, 0x5d, 0x6a, 0x35, 0x9c, 0xb0, 0xd3, - 0x24, 0x9e, 0x9e, 0x5b, 0xd3, 0xd6, 0xd3, 0xdc, 0x4c, 0x97, 0x5a, 0x6f, 0x01, 0x28, 0x9b, 0xd9, - 0x07, 0xd9, 0xc0, 0x5e, 0xe8, 0x34, 0x8c, 0x00, 0x44, 0xc4, 0xd2, 0xf3, 0x6b, 0xda, 0x7a, 0x8e, - 0x0f, 0xec, 0x85, 0xce, 0xd5, 0x08, 0x97, 0x07, 0x96, 0xf1, 0xf2, 0xbf, 0x34, 0xb4, 0x14, 0x31, - 0x62, 0xeb, 0xbe, 0x6b, 0x7b, 0xa3, 0xbe, 0xbb, 0xfe, 0x6c, 0x0a, 0xcd, 0xed, 0xd0, 0xe6, 0x2d, - 0xe2, 0x58, 0xb6, 0xd3, 0x1a, 0x93, 0x7f, 0x10, 0xf9, 0x8f, 0xd1, 0x39, 0xf3, 0xb5, 0xe8, 0x9c, - 0x3d, 0x35, 0x9d, 0x5f, 0x42, 0x39, 0xe8, 0x67, 0x74, 0x08, 0x04, 0x41, 0xbe, 0xb6, 0xdc, 0xeb, - 0x96, 0x16, 0x58, 0x03, 0xa3, 0x23, 0xfb, 0x2a, 0x2b, 0x20, 0x66, 0x6a, 0xd4, 0xc3, 0x77, 0x0d, - 0x93, 0x40, 0x00, 0x08, 0x53, 0x45, 0x1b, 0xc0, 0x65, 0x53, 0x65, 0xbc, 0xfc, 0x07, 0xce, 0x87, - 0x7a, 0xe8, 0x38, 0x63, 0x3e, 0x7c, 0x53, 0x7c, 0xb8, 0x8c, 0xf2, 0x0e, 0xb5, 0x08, 0x5f, 0xd8, - 0x6c, 0xec, 0x23, 0x06, 0x26, 0x56, 0x36, 0x17, 0x61, 0x43, 0xef, 0x89, 0x32, 0x89, 0xf2, 0xc3, - 0x91, 0x08, 0x3d, 0x25, 0x89, 0x7e, 0x9b, 0x41, 0x8b, 0x2c, 0x09, 0x71, 0x5a, 0x1e, 0xf1, 0xfd, - 0x6d, 0x67, 0x8f, 0x8e, 0x89, 0x34, 0x5a, 0x44, 0x42, 0xc3, 0x11, 0xa9, 0xf0, 0x74, 0x44, 0xc2, - 0x0f, 0xd0, 0x82, 0xcd, 0x49, 0xd4, 0x30, 0x2c, 0x8b, 0xfd, 0x9f, 0xf8, 0x7a, 0x7e, 0x6d, 0x72, - 0xbd, 0xb0, 0x51, 0x89, 0x6e, 0x47, 0x49, 0x96, 0x55, 0x04, 0x70, 0x35, 0xea, 0xb0, 0xe5, 0x04, - 0xde, 0x51, 0x6d, 0xb5, 0xd7, 0x2d, 0x15, 0xed, 0x84, 0x48, 0x1a, 0x78, 0x3e, 0x29, 0x2b, 0x1e, - 0xa2, 0xe5, 0x81, 0xaa, 0xf0, 0x73, 0x68, 0xf2, 0x90, 0x1c, 0x01, 0x87, 0xd3, 0xb5, 0x85, 0x5e, - 0xb7, 0x34, 0x73, 0x48, 0x8e, 0x24, 0x55, 0x4c, 0xca, 0x98, 0x78, 0xd7, 0x68, 0x87, 0x44, 0x50, - 0x17, 0x98, 0x08, 0x80, 0xcc, 0x44, 0x00, 0xae, 0xa4, 0x5e, 0xd5, 0xca, 0xff, 0x9e, 0x42, 0xfa, - 0x0e, 0x6d, 0xde, 0x71, 0x8c, 0x66, 0x9b, 0xdc, 0xa6, 0xbb, 0xe6, 0x3e, 0xb1, 0xc2, 0x36, 0x19, - 0xc7, 0xcd, 0x33, 0x90, 0x8d, 0x2a, 0x51, 0x96, 0x1b, 0x2a, 0xca, 0xf2, 0xcf, 0x70, 0x94, 0x95, - 0x1f, 0x66, 0xe1, 0xa6, 0x78, 0xdd, 0xb0, 0xdb, 0xe3, 0xfb, 0xcf, 0x7f, 0x83, 0x71, 0xef, 0x22, - 0x44, 0xee, 0xdb, 0x41, 0xc3, 0xa4, 0x16, 0xf1, 0xf5, 0x2c, 0xec, 0x57, 0xe5, 0x68, 0xbf, 0x92, - 0xdc, 0x5c, 0xd9, 0xba, 0x6f, 0x07, 0x9b, 0xac, 0x11, 0xdf, 0xa3, 0xce, 0x32, 0x4b, 0x48, 0x84, - 0xc5, 0x8a, 0x75, 0xad, 0x9e, 0xef, 0xc3, 0xc7, 0xf9, 0x9c, 0xfb, 0x3a, 0x7c, 0xce, 0x0f, 0xc5, - 0x67, 0x34, 0x14, 0x9f, 0x67, 0x86, 0xe3, 0xf3, 0xec, 0x53, 0x9e, 0x1a, 0x16, 0xc2, 0x26, 0x75, - 0x02, 0xc3, 0x76, 0x88, 0xd7, 0xf0, 0x03, 0x23, 0x08, 0xd9, 0xb1, 0x51, 0x80, 0x65, 0x58, 0x82, - 0x65, 0xd8, 0x8c, 0xc4, 0xbb, 0x20, 0xad, 0x95, 0x7a, 0xdd, 0xd2, 0x39, 0x53, 0x05, 0x95, 0xd3, - 0x61, 0xe1, 0x98, 0x10, 0xbf, 0x82, 0xd2, 0xa6, 0x11, 0xfa, 0x44, 0x9f, 0x5e, 0xd3, 0xd6, 0x67, - 0x37, 0x10, 0x57, 0xcc, 0x10, 0x4e, 0x66, 0x10, 0xca, 0x64, 0x06, 0xa0, 0x68, 0xa1, 0x59, 0x75, - 0xd5, 0xe5, 0xe3, 0x24, 0x7f, 0xba, 0xe3, 0x24, 0x7d, 0xe2, 0x71, 0xf2, 0xeb, 0x14, 0xc2, 0xec, - 0x5a, 0xe7, 0x11, 0x26, 0x1a, 0xf9, 0x4c, 0xfe, 0x15, 0x94, 0xf7, 0xc8, 0xfb, 0x21, 0xf1, 0x03, - 0xea, 0xc9, 0x51, 0xdd, 0x07, 0x65, 0x6e, 0xf6, 0xc1, 0xf2, 0x57, 0x93, 0xf0, 0x71, 0x59, 0x78, - 0x69, 0xbc, 0xf7, 0x0d, 0xde, 0xfb, 0x2e, 0xa2, 0x8c, 0x17, 0x3a, 0x71, 0x7a, 0x0a, 0xe6, 0x7a, - 0xa1, 0xa3, 0xfa, 0x03, 0x00, 0xbc, 0x8d, 0x16, 0x5c, 0xc1, 0xb9, 0xbb, 0x24, 0xfa, 0x76, 0xc9, - 0xcf, 0xdb, 0x0b, 0xbd, 0x6e, 0xe9, 0x6c, 0x2c, 0x4c, 0x7e, 0xbd, 0x9c, 0x4b, 0x88, 0x12, 0xaa, - 0x84, 0x05, 0xb9, 0x41, 0xaa, 0xea, 0x09, 0x5b, 0xe6, 0x12, 0xa2, 0xf2, 0x16, 0xe4, 0x56, 0xd2, - 0xc6, 0xbb, 0x49, 0x3b, 0x2e, 0x64, 0x74, 0xb0, 0x16, 0x50, 0x61, 0x81, 0xc5, 0x9e, 0xe6, 0x93, - 0x03, 0x40, 0x9e, 0x1c, 0x00, 0xe5, 0x3f, 0x4e, 0x89, 0x5a, 0x84, 0x69, 0x12, 0x62, 0x8d, 0xe9, - 0x32, 0xbe, 0x1d, 0x0f, 0x75, 0x3b, 0xfe, 0x34, 0x0f, 0xb7, 0xe3, 0x3b, 0x81, 0xdd, 0xb6, 0x7d, - 0x28, 0x91, 0x8d, 0x89, 0xf4, 0x8d, 0x10, 0xe9, 0x63, 0x0d, 0x2d, 0xdf, 0x30, 0xee, 0xd7, 0x45, - 0x6d, 0xd1, 0xbf, 0x4e, 0xbd, 0x5b, 0xc4, 0xb3, 0xa9, 0x25, 0x52, 0xb2, 0xcb, 0x51, 0x4a, 0x96, - 0x5c, 0x8a, 0xca, 0xc0, 0x5e, 0x3c, 0x47, 0xbb, 0x20, 0xe6, 0x3a, 0x58, 0x73, 0x7d, 0x30, 0x3c, - 0xea, 0x57, 0x08, 0xfc, 0x53, 0x0d, 0xad, 0x04, 0x34, 0x30, 0xda, 0x0d, 0x33, 0xec, 0x84, 0x6d, - 0x03, 0xf6, 0xec, 0xd0, 0x37, 0x5a, 0x2c, 0x3d, 0x62, 0xbe, 0xde, 0x78, 0xa2, 0xaf, 0x6f, 0xb3, - 0x6e, 0x9b, 0xfd, 0x5e, 0x77, 0x58, 0x27, 0xee, 0xea, 0xf3, 0xc2, 0xd5, 0x4b, 0xc1, 0x80, 0x26, - 0xf5, 0x81, 0x68, 0xf1, 0x33, 0x0d, 0x15, 0x9f, 0xbc, 0x7a, 0xa7, 0xcb, 0xb5, 0x7e, 0x20, 0xe7, - 0x5a, 0x85, 0x8d, 0x4a, 0x85, 0x57, 0xae, 0x2b, 0x72, 0xe5, 0xba, 0xe2, 0x1e, 0xb6, 0x60, 0x4a, - 0x51, 0xe5, 0xba, 0xf2, 0x76, 0x68, 0x38, 0x81, 0x1d, 0x1c, 0x9d, 0x94, 0x9b, 0x15, 0x3f, 0xd5, - 0xd0, 0xd9, 0x27, 0x4e, 0xfa, 0x59, 0xb0, 0xb0, 0xfc, 0x15, 0x2f, 0xb9, 0xd6, 0x89, 0xeb, 0xd9, - 0xd4, 0xb3, 0x03, 0xfb, 0x83, 0x91, 0xcf, 0x20, 0x5f, 0x43, 0xd3, 0x0e, 0xb9, 0xd7, 0x10, 0x13, - 0x3e, 0x82, 0x6d, 0x4a, 0x83, 0x0b, 0xd9, 0xb2, 0x43, 0xee, 0xdd, 0x12, 0xb0, 0x64, 0x42, 0x41, - 0x82, 0xd5, 0xfc, 0x33, 0x73, 0xea, 0xfc, 0xf3, 0xcb, 0x14, 0xd4, 0x1f, 0x25, 0x3f, 0x8f, 0x7a, - 0x52, 0xf1, 0x3f, 0x71, 0xf3, 0x9f, 0xf9, 0x65, 0x68, 0xd3, 0x70, 0x4c, 0xd2, 0x6e, 0x8f, 0x2f, - 0x43, 0x03, 0xbd, 0xf4, 0x74, 0x9f, 0x38, 0xca, 0x0f, 0xf9, 0xbb, 0x1c, 0xe1, 0xd3, 0x51, 0xa7, - 0xed, 0xb7, 0xe2, 0xd2, 0xdf, 0x4f, 0x01, 0x4d, 0x6f, 0x13, 0xaf, 0x63, 0x3b, 0xc6, 0xf8, 0x3a, - 0xfa, 0x2c, 0x57, 0x63, 0xbf, 0x9d, 0xab, 0x82, 0x44, 0xa0, 0xdc, 0x29, 0x08, 0xf4, 0xa7, 0x14, - 0xd4, 0x6e, 0xef, 0xb8, 0xd6, 0xe8, 0xb3, 0x67, 0xc8, 0x88, 0x14, 0x0f, 0xec, 0x32, 0x27, 0x3e, - 0xb0, 0xfb, 0x68, 0x0e, 0x4d, 0x83, 0x07, 0x6f, 0x10, 0x9f, 0x25, 0x67, 0xf8, 0x26, 0xca, 0xfb, - 0xd1, 0x23, 0x44, 0xf0, 0x65, 0x61, 0x63, 0x25, 0xea, 0xaf, 0xbe, 0x4e, 0xe4, 0x86, 0xf4, 0x1b, - 0xc7, 0x86, 0xbc, 0x31, 0x51, 0x8f, 0x75, 0xe0, 0x4d, 0x94, 0x01, 0xaf, 0x58, 0x22, 0x89, 0x5b, - 0x8c, 0xb4, 0x49, 0x8f, 0xfa, 0xf8, 0x82, 0xf3, 0x66, 0x8a, 0x1e, 0xd1, 0x15, 0x5b, 0x68, 0xce, - 0x8a, 0x1e, 0xc6, 0x35, 0xf6, 0x68, 0xe8, 0x58, 0xfa, 0x3c, 0x68, 0x3b, 0x17, 0x69, 0x1b, 0xf0, - 0x6e, 0xae, 0x76, 0xbe, 0xd7, 0x2d, 0xe9, 0x96, 0x22, 0x50, 0xb4, 0xcf, 0xaa, 0x32, 0x66, 0x6a, - 0x1b, 0x9e, 0x91, 0xc1, 0x1a, 0x4b, 0xa6, 0x4a, 0x8f, 0xcb, 0xb8, 0xa9, 0xbc, 0x99, 0x6a, 0x2a, - 0xc7, 0xf0, 0x7b, 0x68, 0x16, 0xfe, 0xd5, 0xf0, 0xc4, 0x4b, 0xab, 0x3e, 0x07, 0x64, 0x65, 0xca, - 0x33, 0x2c, 0xfe, 0xde, 0xad, 0x2d, 0xe3, 0x8a, 0xea, 0x19, 0x45, 0x84, 0xdf, 0x45, 0x1c, 0x68, - 0x10, 0xfe, 0x72, 0x47, 0xbc, 0xa3, 0x3c, 0xab, 0x0c, 0x20, 0xbf, 0xea, 0xe1, 0x91, 0xd8, 0x96, - 0x60, 0x45, 0xfd, 0xb4, 0x2c, 0xc1, 0xaf, 0xa3, 0xac, 0xcb, 0x5f, 0xc9, 0x08, 0xfa, 0x2c, 0x45, - 0x7a, 0xe5, 0xc7, 0x33, 0x62, 0x4f, 0xe0, 0x88, 0xa2, 0x2d, 0xea, 0xcd, 0x14, 0x79, 0xfc, 0x79, - 0x05, 0x6c, 0x3e, 0x92, 0x22, 0xf9, 0xd5, 0x05, 0x57, 0x24, 0x1a, 0xaa, 0x8a, 0x04, 0x88, 0x3b, - 0x08, 0x87, 0x50, 0x2f, 0x6c, 0x04, 0xb4, 0xe1, 0x8b, 0x8a, 0x21, 0xec, 0x14, 0x85, 0x8d, 0x0b, - 0xfd, 0xfb, 0xd6, 0xa0, 0x8a, 0x22, 0xaf, 0x86, 0x86, 0x09, 0x91, 0x32, 0xca, 0x7c, 0x52, 0xca, - 0x58, 0xb0, 0x07, 0x9f, 0xd0, 0x60, 0xf7, 0x93, 0x58, 0x20, 0x7d, 0x58, 0xe3, 0x2c, 0xe0, 0xcd, - 0x54, 0x16, 0x70, 0x8c, 0x87, 0x91, 0xf8, 0x7e, 0x06, 0xdb, 0xa1, 0x12, 0x46, 0xf2, 0x87, 0xb5, - 0x28, 0x8c, 0x04, 0x96, 0x0c, 0x23, 0x01, 0xe3, 0x06, 0x9a, 0xf1, 0xe4, 0xfc, 0x19, 0xae, 0xae, - 0x12, 0xab, 0x8e, 0x27, 0xd7, 0x9c, 0x55, 0x4a, 0x27, 0x95, 0x55, 0x8a, 0x08, 0xef, 0x22, 0x64, - 0xf6, 0x33, 0x47, 0xf8, 0xd8, 0x5f, 0xd8, 0x38, 0x13, 0x69, 0x4f, 0xe4, 0x94, 0x35, 0x9d, 0x5d, - 0x57, 0xe3, 0xe6, 0x8a, 0x5e, 0x49, 0x0d, 0x73, 0x83, 0x19, 0xa5, 0x4e, 0x50, 0x16, 0x91, 0xdc, - 0xa0, 0xe6, 0x54, 0xe2, 0x4c, 0x8c, 0x30, 0xd5, 0x0d, 0x7d, 0x98, 0x59, 0x19, 0xf4, 0x13, 0x07, - 0xa8, 0x98, 0x48, 0x56, 0x26, 0x52, 0x0a, 0x6e, 0x65, 0xdc, 0x5c, 0xb5, 0x32, 0xc6, 0xf1, 0x3b, - 0xa8, 0x10, 0xc6, 0xd7, 0x75, 0x7d, 0x0e, 0xb4, 0xea, 0x4f, 0xba, 0xc9, 0xf3, 0x34, 0x5e, 0xea, - 0xa0, 0xe8, 0x95, 0x35, 0xe1, 0xef, 0xa3, 0xe9, 0xa8, 0xae, 0x6f, 0x3b, 0x7b, 0x54, 0x5f, 0x50, - 0x35, 0x27, 0x4b, 0xfa, 0x5c, 0xb3, 0x1d, 0xa3, 0xaa, 0x66, 0x49, 0x80, 0x4d, 0x34, 0xeb, 0x29, - 0xd7, 0x56, 0x1d, 0xab, 0xfb, 0xe1, 0x80, 0x4b, 0x2d, 0xdf, 0x0f, 0xd5, 0x6e, 0xea, 0x7e, 0xa8, - 0xca, 0x58, 0x04, 0x87, 0xfc, 0x90, 0xd5, 0x17, 0xd5, 0x08, 0x96, 0xcf, 0x5e, 0x1e, 0xc1, 0xa2, - 0xa1, 0x1a, 0xc1, 0x02, 0xc4, 0x87, 0x48, 0xc4, 0x4a, 0xfc, 0x41, 0x5a, 0x5f, 0x52, 0xe3, 0x77, - 0xe0, 0x57, 0x6b, 0x1e, 0xbf, 0xc9, 0xae, 0x6a, 0xfc, 0x26, 0xa5, 0x8c, 0x73, 0x6e, 0x54, 0xe9, - 0xd0, 0x97, 0x55, 0xce, 0xa9, 0x25, 0x10, 0x91, 0x0e, 0x45, 0x98, 0xca, 0xb9, 0x3e, 0xcc, 0x38, - 0xe7, 0xf6, 0x0b, 0x4c, 0xfa, 0x8a, 0xca, 0xb9, 0x44, 0xe9, 0x89, 0x73, 0x2e, 0x6e, 0xae, 0x72, - 0x2e, 0xc6, 0x6b, 0x39, 0x94, 0x81, 0xaf, 0xed, 0x7e, 0xf9, 0xc7, 0x29, 0x34, 0x97, 0x28, 0xd4, - 0xe1, 0xff, 0x43, 0x53, 0x90, 0x7f, 0xf1, 0x64, 0x06, 0xf7, 0xba, 0xa5, 0x59, 0x47, 0x4d, 0xbe, - 0x40, 0x8e, 0x37, 0x50, 0x2e, 0x2a, 0x98, 0x8a, 0x8a, 0x19, 0x24, 0x32, 0x11, 0x26, 0x27, 0x32, - 0x11, 0x86, 0xab, 0x28, 0xdb, 0xe1, 0x87, 0xbd, 0x48, 0x65, 0x60, 0xfd, 0x04, 0x24, 0xa7, 0x77, - 0x02, 0x92, 0xb2, 0xb3, 0xa9, 0x53, 0x14, 0x85, 0xfb, 0xf5, 0xc2, 0xf4, 0xd3, 0xd4, 0x0b, 0xcb, - 0x1f, 0x20, 0x0c, 0x0e, 0xdc, 0x0d, 0x3c, 0x62, 0x74, 0xa2, 0x6c, 0x64, 0x0d, 0xa5, 0xfa, 0x29, - 0xdd, 0x7c, 0xaf, 0x5b, 0x9a, 0xb6, 0xe5, 0xe4, 0x2c, 0x65, 0x5b, 0xb8, 0x16, 0xcf, 0x86, 0xe7, - 0x17, 0x0b, 0x30, 0xa0, 0x9c, 0xd3, 0x9c, 0x34, 0xc1, 0xf2, 0x4f, 0x26, 0xd1, 0xcc, 0x0e, 0xe4, - 0x79, 0x75, 0x9e, 0x41, 0x9d, 0x62, 0xdc, 0x17, 0x50, 0xfa, 0x9e, 0x11, 0x98, 0xfb, 0x30, 0x6a, - 0x8e, 0x4f, 0x0d, 0x00, 0x79, 0x6a, 0x00, 0xe0, 0x4d, 0x34, 0xb7, 0xe7, 0xd1, 0x4e, 0x43, 0x0c, - 0xc7, 0x92, 0xce, 0xc9, 0xf8, 0x99, 0x3b, 0x13, 0x09, 0x43, 0xd5, 0x67, 0xee, 0x8a, 0x20, 0x4e, - 0x3f, 0xa7, 0x4e, 0x4c, 0x3f, 0xaf, 0xa1, 0x59, 0xe2, 0x79, 0xd4, 0xdb, 0xde, 0xbb, 0x61, 0xfb, - 0x3e, 0xe3, 0x6c, 0x1a, 0x6c, 0x84, 0xf0, 0x57, 0x25, 0x52, 0xe7, 0x44, 0x1f, 0xfc, 0x1a, 0x9a, - 0xde, 0xa3, 0x9e, 0x49, 0x1a, 0x6d, 0xd2, 0x32, 0xcc, 0x23, 0x48, 0x06, 0x72, 0x7c, 0x87, 0x02, - 0xfc, 0x4d, 0x80, 0xe5, 0x4f, 0x18, 0x12, 0x8c, 0x2f, 0xa3, 0x3c, 0xef, 0xed, 0x90, 0x7b, 0x70, - 0xfc, 0xe7, 0x38, 0x33, 0x01, 0x7c, 0x8b, 0xdc, 0x93, 0x99, 0x19, 0x61, 0xe5, 0x5f, 0xa4, 0xd0, - 0xf4, 0x3b, 0xcc, 0x65, 0xd1, 0x32, 0xf4, 0x27, 0xad, 0x9d, 0x38, 0xe9, 0xe1, 0x92, 0xfa, 0x4b, - 0x28, 0x0b, 0x4b, 0xd3, 0x5f, 0x12, 0x7e, 0xae, 0x7b, 0xb4, 0xa3, 0x74, 0xc8, 0x70, 0xe4, 0x98, - 0x4f, 0xa6, 0x86, 0xf7, 0x49, 0xfa, 0x74, 0x3e, 0xb9, 0xf8, 0x5d, 0x94, 0x86, 0xe0, 0xc1, 0x79, - 0x94, 0xde, 0x62, 0x2b, 0x34, 0x3f, 0x81, 0x0b, 0x28, 0xbb, 0x75, 0xd7, 0x36, 0x03, 0x62, 0xcd, - 0x6b, 0x38, 0x8b, 0x26, 0x6f, 0xde, 0xbc, 0x31, 0x9f, 0xc2, 0x4b, 0x68, 0xfe, 0x1a, 0x31, 0xac, - 0xb6, 0xed, 0x90, 0xad, 0xfb, 0x3c, 0x6b, 0x98, 0x9f, 0xdc, 0xf8, 0x87, 0x86, 0xd2, 0xfc, 0x8a, - 0x44, 0xd0, 0xdc, 0xeb, 0x24, 0xe0, 0x3c, 0x07, 0xc4, 0xc7, 0xb8, 0x9f, 0x92, 0xf4, 0xa9, 0x5f, - 0x3c, 0x13, 0xc7, 0x8f, 0x12, 0x8b, 0xe5, 0xe7, 0x7e, 0xf4, 0x97, 0x2f, 0x7f, 0x99, 0xba, 0x50, - 0xd6, 0xab, 0x77, 0xff, 0xbf, 0x7a, 0x40, 0x9b, 0x97, 0x7c, 0x12, 0x54, 0x1f, 0x80, 0xf7, 0x3f, - 0xac, 0x3e, 0xb0, 0xad, 0x0f, 0xaf, 0x68, 0x17, 0x5f, 0xd2, 0xf0, 0x15, 0x94, 0x86, 0x35, 0xc4, - 0x3c, 0x10, 0xe5, 0xf5, 0x7c, 0xb2, 0xee, 0xc9, 0x8f, 0x53, 0x1a, 0xf4, 0xcd, 0xbc, 0x01, 0x7f, - 0xb5, 0x85, 0x57, 0x8e, 0xdd, 0x98, 0xb6, 0x98, 0x63, 0x8a, 0xfc, 0xec, 0xe4, 0x8d, 0x36, 0xf7, - 0x89, 0x79, 0x58, 0x27, 0xbe, 0x4b, 0x1d, 0x9f, 0xd4, 0xde, 0xfb, 0xe2, 0xef, 0xab, 0x13, 0x1f, - 0x3d, 0x5a, 0xd5, 0x3e, 0x7f, 0xb4, 0xaa, 0x3d, 0x7c, 0xb4, 0xaa, 0xfd, 0xed, 0xd1, 0xaa, 0xf6, - 0xc9, 0xe3, 0xd5, 0x89, 0x87, 0x8f, 0x57, 0x27, 0xbe, 0x78, 0xbc, 0x3a, 0xf1, 0xc3, 0xe7, 0xa5, - 0x3f, 0xf3, 0x32, 0xbc, 0x8e, 0x61, 0x19, 0xae, 0x47, 0x0f, 0x88, 0x19, 0x88, 0x5f, 0xd1, 0x5f, - 0x69, 0xfd, 0x26, 0xb5, 0x74, 0x15, 0x80, 0x5b, 0x5c, 0x5c, 0xd9, 0xa6, 0x95, 0xab, 0xae, 0xdd, - 0xcc, 0x80, 0x2d, 0x97, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x2f, 0x05, 0xeb, 0xb3, 0x36, - 0x00, 0x00, + // 2337 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x52, 0xe2, 0xaf, 0xa1, 0x7e, 0x8e, 0x7e, 0x78, 0x4d, 0xdb, 0xa2, 0xc0, 0x00, 0xdf, + 0x28, 0x46, 0x4c, 0xe6, 0x2b, 0x27, 0x45, 0x60, 0x14, 0x28, 0x4c, 0x45, 0x49, 0x24, 0xc4, 0xb5, + 0x43, 0xd9, 0x48, 0x5b, 0x04, 0x60, 0x96, 0xbb, 0x23, 0x6a, 0x25, 0x72, 0x67, 0xb3, 0x3b, 0x6b, + 0x5b, 0x31, 0x02, 0x14, 0x2d, 0x50, 0xe4, 0x52, 0x34, 0x68, 0x7b, 0x6e, 0x02, 0xf4, 0xd6, 0x53, + 0xff, 0x82, 0x1e, 0x8a, 0x1e, 0x72, 0x74, 0xd1, 0x4b, 0x4e, 0x6c, 0x6b, 0x3b, 0x45, 0xc1, 0x43, + 0xef, 0xbd, 0x15, 0xf3, 0x66, 0x76, 0x39, 0xb3, 0xa6, 0x61, 0x59, 0x69, 0x52, 0x43, 0xe0, 0x25, + 0x31, 0x3f, 0x6f, 0xde, 0x9b, 0x37, 0x6f, 0x3e, 0x3b, 0xef, 0xcd, 0x0f, 0xa1, 0x45, 0xff, 0xb0, + 0x53, 0xb7, 0x7c, 0xb7, 0x4e, 0x6e, 0x13, 0x8f, 0xd5, 0xfc, 0x80, 0x32, 0x8a, 0x27, 0x2d, 0xdf, + 0x2d, 0x57, 0x3a, 0x94, 0x76, 0xba, 0xa4, 0x0e, 0x50, 0x3b, 0xda, 0xab, 0x33, 0xb7, 0x47, 0x42, + 0x66, 0xf5, 0x7c, 0xd1, 0xaa, 0xbc, 0x14, 0xab, 0x86, 0x51, 0xbb, 0xe7, 0xb2, 0x34, 0xba, 0x4f, + 0xac, 0x2e, 0xdb, 0x97, 0xe8, 0xb9, 0xb4, 0x31, 0xd2, 0xf3, 0xd9, 0x91, 0x14, 0x5e, 0xea, 0xb8, + 0x6c, 0x3f, 0x6a, 0xd7, 0x6c, 0xda, 0xab, 0x77, 0x68, 0x87, 0x0e, 0x5b, 0xf1, 0x5f, 0xf0, 0x03, + 0xfe, 0x25, 0x9b, 0x9f, 0x97, 0xb6, 0x78, 0x27, 0x96, 0xe7, 0x51, 0x66, 0x31, 0x97, 0x7a, 0xa1, + 0x94, 0xbe, 0x7a, 0xf8, 0x7a, 0x58, 0x73, 0x29, 0x97, 0xf6, 0x2c, 0x7b, 0xdf, 0xf5, 0x48, 0x70, + 0x54, 0x8f, 0x7d, 0x0a, 0x48, 0x48, 0xa3, 0xc0, 0x26, 0xf5, 0x0e, 0xf1, 0x48, 0x60, 0x31, 0xe2, + 0x08, 0xad, 0xea, 0xaf, 0x33, 0x68, 0x61, 0x87, 0xb6, 0x77, 0x61, 0x24, 0x8c, 0x38, 0x5b, 0x3c, + 0x1a, 0xf8, 0x22, 0xca, 0x1d, 0xd0, 0x76, 0xcb, 0x75, 0x4c, 0x63, 0xcd, 0x58, 0x2f, 0x36, 0x16, + 0x07, 0xfd, 0xca, 0xdc, 0x01, 0x6d, 0x6f, 0x3b, 0x2f, 0xd3, 0x9e, 0xcb, 0x60, 0x0c, 0xcd, 0x2c, + 0x00, 0xf8, 0x55, 0x84, 0x78, 0xdb, 0x90, 0x30, 0xde, 0x3e, 0x03, 0xed, 0x57, 0x06, 0xfd, 0x0a, + 0x3e, 0xa0, 0xed, 0x5d, 0xc2, 0x34, 0x95, 0x42, 0x8c, 0xe1, 0x97, 0x50, 0xf6, 0xc3, 0x88, 0x44, + 0xc4, 0x9c, 0x1c, 0x76, 0x00, 0x80, 0xda, 0x01, 0x00, 0x78, 0x1b, 0xe5, 0xed, 0x80, 0x70, 0x9f, + 0xcd, 0xa9, 0x35, 0x63, 0xbd, 0xb4, 0x51, 0xae, 0x89, 0x40, 0xd4, 0xe2, 0x70, 0xd5, 0x6e, 0xc6, + 0x33, 0xd4, 0x58, 0xfc, 0xa2, 0x5f, 0x99, 0x18, 0xf4, 0x2b, 0xb1, 0xca, 0xa7, 0x7f, 0xad, 0x18, + 0xcd, 0xf8, 0x07, 0x7e, 0x11, 0x4d, 0x1e, 0xd0, 0xb6, 0x99, 0x05, 0x33, 0x85, 0x9a, 0xe5, 0xbb, + 0xb5, 0x1d, 0xda, 0x6e, 0x94, 0xa4, 0x12, 0x17, 0x36, 0xf9, 0x7f, 0xaa, 0xff, 0x34, 0xd0, 0xec, + 0x0e, 0x6d, 0xbf, 0xcb, 0x1d, 0x38, 0xdd, 0x31, 0xa9, 0x7e, 0x9e, 0x81, 0xa1, 0xbe, 0x43, 0xac, + 0xf0, 0xb4, 0x4f, 0xff, 0x77, 0x10, 0xb2, 0xbb, 0x51, 0xc8, 0x48, 0xc0, 0x7d, 0xcd, 0x42, 0xd7, + 0x67, 0x06, 0xfd, 0xca, 0xa2, 0x44, 0x35, 0x67, 0x8b, 0x09, 0x58, 0xfd, 0xc5, 0x14, 0x5a, 0x8e, + 0x43, 0xd4, 0x24, 0x2c, 0x0a, 0xbc, 0x71, 0xa4, 0x46, 0x46, 0x0a, 0xbf, 0x8c, 0x72, 0x01, 0xb1, + 0x42, 0xea, 0x99, 0x39, 0xd0, 0x59, 0x1a, 0xf4, 0x2b, 0xf3, 0x02, 0x51, 0x14, 0x64, 0x1b, 0xfc, + 0x3d, 0x34, 0x73, 0x18, 0xb5, 0x49, 0xe0, 0x11, 0x46, 0x42, 0xde, 0x51, 0x1e, 0x94, 0xca, 0x83, + 0x7e, 0x65, 0x65, 0x28, 0xd0, 0xfa, 0x9a, 0x56, 0x71, 0xee, 0xa6, 0x4f, 0x9d, 0x96, 0x17, 0xf5, + 0xda, 0x24, 0x30, 0x0b, 0x6b, 0xc6, 0x7a, 0x56, 0xb8, 0xe9, 0x53, 0xe7, 0xfb, 0x00, 0xaa, 0x6e, + 0x26, 0x20, 0xef, 0x38, 0x88, 0xbc, 0x96, 0xc5, 0x40, 0x44, 0x1c, 0xb3, 0xb8, 0x66, 0xac, 0x17, + 0x44, 0xc7, 0x41, 0xe4, 0x5d, 0x8d, 0x71, 0xb5, 0x63, 0x15, 0xaf, 0xfe, 0xcb, 0x40, 0x4b, 0x31, + 0x23, 0xb6, 0xee, 0xfa, 0x6e, 0x70, 0xda, 0x57, 0x89, 0x9f, 0x4f, 0xa1, 0xb9, 0x1d, 0xda, 0xbe, + 0x41, 0x3c, 0xc7, 0xf5, 0x3a, 0x63, 0xf2, 0x8f, 0x22, 0xff, 0x63, 0x74, 0xce, 0x7d, 0x2d, 0x3a, + 0xe7, 0x8f, 0x4d, 0xe7, 0x57, 0x50, 0x01, 0xf4, 0xac, 0x1e, 0x81, 0x8f, 0xa0, 0xd8, 0x58, 0x1e, + 0xf4, 0x2b, 0x0b, 0xbc, 0x81, 0xd5, 0x53, 0x63, 0x95, 0x97, 0x10, 0x77, 0x35, 0xd6, 0x08, 0x7d, + 0xcb, 0x26, 0xf0, 0x01, 0x48, 0x57, 0x65, 0x1b, 0xc0, 0x55, 0x57, 0x55, 0xbc, 0xfa, 0x47, 0xc1, + 0x87, 0x66, 0xe4, 0x79, 0x63, 0x3e, 0x7c, 0x53, 0x7c, 0xb8, 0x8c, 0x8a, 0x1e, 0x75, 0x88, 0x98, + 0xd8, 0xfc, 0x30, 0x46, 0x1c, 0x4c, 0xcd, 0x6c, 0x21, 0xc6, 0x4e, 0xbc, 0x26, 0xaa, 0x24, 0x2a, + 0x9e, 0x8c, 0x44, 0xe8, 0x19, 0x49, 0xf4, 0xfb, 0x1c, 0x5a, 0xdc, 0xa1, 0xed, 0x6d, 0xaf, 0x13, + 0x90, 0x30, 0xdc, 0xf6, 0xf6, 0xe8, 0x98, 0x48, 0xa7, 0x8b, 0x48, 0xe8, 0x64, 0x44, 0x2a, 0x3d, + 0x1b, 0x91, 0xf0, 0x3d, 0xb4, 0xe0, 0x0a, 0x12, 0xb5, 0x2c, 0xc7, 0xe1, 0xff, 0x27, 0xa1, 0x59, + 0x5c, 0x9b, 0x5c, 0x2f, 0x6d, 0xd4, 0xe2, 0x2a, 0x3f, 0xcd, 0xb2, 0x9a, 0x04, 0xae, 0xc6, 0x0a, + 0x5b, 0x1e, 0x0b, 0x8e, 0x1a, 0xab, 0x83, 0x7e, 0xa5, 0xec, 0xa6, 0x44, 0x4a, 0xc7, 0xf3, 0x69, + 0x59, 0xf9, 0x10, 0x2d, 0x8f, 0x34, 0x85, 0x5f, 0x40, 0x93, 0x87, 0xe4, 0x08, 0x38, 0x9c, 0x6d, + 0x2c, 0x0c, 0xfa, 0x95, 0x99, 0x43, 0x72, 0xa4, 0x98, 0xe2, 0x52, 0xce, 0xc4, 0xdb, 0x56, 0x37, + 0x22, 0x92, 0xba, 0xc0, 0x44, 0x00, 0x54, 0x26, 0x02, 0x70, 0x25, 0xf3, 0xba, 0x51, 0xfd, 0xf7, + 0x14, 0x32, 0x77, 0x68, 0xfb, 0x96, 0x67, 0xb5, 0xbb, 0xe4, 0x26, 0xdd, 0xb5, 0xf7, 0x89, 0x13, + 0x75, 0xc9, 0xf8, 0xbb, 0x79, 0x0e, 0xaa, 0x51, 0xed, 0x2b, 0x2b, 0x9c, 0xe8, 0x2b, 0x2b, 0x3e, + 0xc7, 0x5f, 0x59, 0xf5, 0x7e, 0x1e, 0x76, 0x8a, 0x6f, 0x5a, 0x6e, 0x77, 0xbc, 0xff, 0xf9, 0x6f, + 0x30, 0xee, 0x7d, 0x84, 0xc8, 0x5d, 0x97, 0xb5, 0x6c, 0xea, 0x90, 0xd0, 0xcc, 0xc3, 0x7a, 0x55, + 0x8d, 0xd7, 0x2b, 0x25, 0xcc, 0xb5, 0xad, 0xbb, 0x2e, 0xdb, 0xe4, 0x8d, 0xc4, 0x1a, 0x75, 0x96, + 0x7b, 0x42, 0x62, 0x6c, 0x68, 0xd8, 0x34, 0x9a, 0xc5, 0x04, 0x7e, 0x9c, 0xcf, 0x85, 0xaf, 0xc3, + 0xe7, 0xe2, 0x89, 0xf8, 0x8c, 0x4e, 0xc4, 0xe7, 0x99, 0x93, 0xf1, 0x79, 0xf6, 0x19, 0xb3, 0x86, + 0x83, 0xb0, 0x4d, 0x3d, 0x66, 0xb9, 0x1e, 0x09, 0x5a, 0x21, 0xb3, 0x58, 0xc4, 0xd3, 0x46, 0x09, + 0xa6, 0x61, 0x09, 0xa6, 0x61, 0x33, 0x16, 0xef, 0x82, 0xb4, 0x51, 0x19, 0xf4, 0x2b, 0xe7, 0x6c, + 0x1d, 0xd4, 0xb2, 0xc3, 0xc2, 0x63, 0x42, 0xfc, 0x1a, 0xca, 0xda, 0x56, 0x14, 0x12, 0x73, 0x7a, + 0xcd, 0x58, 0x9f, 0xdd, 0x40, 0xc2, 0x30, 0x47, 0x04, 0x99, 0x41, 0xa8, 0x92, 0x19, 0x80, 0xb2, + 0x83, 0x66, 0xf5, 0x59, 0x57, 0xd3, 0x49, 0xf1, 0x78, 0xe9, 0x24, 0xfb, 0xd4, 0x74, 0xf2, 0xdb, + 0x0c, 0xc2, 0x7c, 0x5b, 0x17, 0x10, 0x2e, 0x3a, 0xf5, 0x95, 0xfc, 0x6b, 0xa8, 0x18, 0x90, 0x0f, + 0x23, 0x12, 0x32, 0x1a, 0xa8, 0x5f, 0x75, 0x02, 0xaa, 0xdc, 0x4c, 0xc0, 0xea, 0x57, 0x93, 0x70, + 0x48, 0x2a, 0xa3, 0x34, 0x5e, 0xfb, 0x46, 0xaf, 0x7d, 0x17, 0x51, 0x2e, 0x88, 0xbc, 0x61, 0x79, + 0x0a, 0xee, 0x06, 0x91, 0xa7, 0xc7, 0x03, 0x00, 0xbc, 0x8d, 0x16, 0x7c, 0xc9, 0xb9, 0xdb, 0xa4, + 0x25, 0xc3, 0x28, 0xf2, 0xed, 0x85, 0x41, 0xbf, 0x72, 0x76, 0x28, 0xdc, 0x49, 0x05, 0x74, 0x2e, + 0x25, 0x4a, 0x99, 0x92, 0x1e, 0x14, 0x46, 0x99, 0x6a, 0xa6, 0x7c, 0x99, 0x4b, 0x89, 0xaa, 0x7f, + 0x9a, 0x92, 0x87, 0xe1, 0xb6, 0x4d, 0x88, 0x33, 0x9e, 0xe7, 0xf1, 0xb6, 0xf6, 0x44, 0xdb, 0xda, + 0xcf, 0x8a, 0xb0, 0xad, 0xbd, 0xc5, 0xdc, 0xae, 0x1b, 0xc2, 0x1d, 0xcd, 0x98, 0x48, 0xdf, 0x08, + 0x91, 0x3e, 0x31, 0xd0, 0xf2, 0x35, 0xeb, 0x6e, 0x53, 0x5e, 0x6e, 0x85, 0x6f, 0xd2, 0xe0, 0x06, + 0x09, 0x5c, 0xea, 0xc8, 0x5a, 0xea, 0x72, 0x5c, 0x4b, 0xa5, 0xa7, 0xa2, 0x36, 0x52, 0x4b, 0x14, + 0x57, 0x17, 0xe4, 0x58, 0x47, 0x5b, 0x6e, 0x8e, 0x86, 0x4f, 0x7b, 0xed, 0x8f, 0x7f, 0x66, 0xa0, + 0x15, 0x46, 0x99, 0xd5, 0x6d, 0xd9, 0x51, 0x2f, 0xea, 0x5a, 0xb0, 0xd8, 0x46, 0xa1, 0xd5, 0xe1, + 0x75, 0x0d, 0x8f, 0xf5, 0xc6, 0x13, 0x63, 0x7d, 0x93, 0xab, 0x6d, 0x26, 0x5a, 0xb7, 0xb8, 0x92, + 0x08, 0xf5, 0x79, 0x19, 0xea, 0x25, 0x36, 0xa2, 0x49, 0x73, 0x24, 0x5a, 0xfe, 0xdc, 0x40, 0xe5, + 0x27, 0xcf, 0xde, 0xf1, 0x8a, 0xa4, 0x1f, 0xaa, 0x45, 0x52, 0x69, 0xa3, 0x56, 0x13, 0x57, 0xa7, + 0x35, 0xf5, 0xea, 0xb4, 0xe6, 0x1f, 0x76, 0x60, 0x48, 0xf1, 0xd5, 0x69, 0xed, 0xdd, 0xc8, 0xf2, + 0x98, 0xcb, 0x8e, 0x9e, 0x56, 0x54, 0x95, 0x3f, 0x33, 0xd0, 0xd9, 0x27, 0x0e, 0xfa, 0x79, 0xf0, + 0xb0, 0xfa, 0x55, 0x06, 0xad, 0xec, 0xd0, 0x76, 0x93, 0xf8, 0x81, 0x4b, 0x03, 0x97, 0xb9, 0x1f, + 0x9d, 0xfa, 0xd2, 0xef, 0xbb, 0x68, 0xda, 0x23, 0x77, 0x5a, 0x72, 0xc0, 0x47, 0xb0, 0x4c, 0x19, + 0xb0, 0x93, 0x5a, 0xf6, 0xc8, 0x9d, 0x1b, 0x12, 0x56, 0x5c, 0x28, 0x29, 0xb0, 0x5e, 0x38, 0xe6, + 0x8e, 0x5d, 0x38, 0x3e, 0xca, 0xc0, 0xc5, 0xa1, 0x12, 0xe7, 0xd3, 0x5e, 0x54, 0xfc, 0x4f, 0xc2, + 0xfc, 0x67, 0xb1, 0x8b, 0xd9, 0xb4, 0x3c, 0x9b, 0x74, 0xbb, 0xe3, 0x5d, 0xcc, 0xc8, 0x28, 0x3d, + 0xdb, 0xd9, 0x44, 0xf5, 0xbe, 0x78, 0x18, 0x22, 0x63, 0x7a, 0xda, 0x69, 0xfb, 0xad, 0x84, 0xf4, + 0x0f, 0x53, 0x40, 0xd3, 0x9b, 0x24, 0xe8, 0xb9, 0x9e, 0x35, 0xde, 0x47, 0x3e, 0xcf, 0xd7, 0xa8, + 0xdf, 0xce, 0x56, 0x41, 0x21, 0x50, 0xe1, 0x18, 0x04, 0x7a, 0x34, 0x8d, 0xa6, 0x81, 0x33, 0xd7, + 0x48, 0xc8, 0x0b, 0x0a, 0x7c, 0x1d, 0x15, 0xc3, 0xf8, 0xe5, 0x16, 0xb0, 0xa7, 0xb4, 0xb1, 0x12, + 0xd7, 0x61, 0xfa, 0x93, 0x2e, 0x11, 0x80, 0xa4, 0xf1, 0xd0, 0xf8, 0xdb, 0x13, 0xcd, 0xa1, 0x0d, + 0xbc, 0x89, 0x72, 0xc0, 0x03, 0x47, 0x16, 0x1e, 0x8b, 0xb1, 0x35, 0xe5, 0x25, 0x94, 0x70, 0x52, + 0x34, 0xd3, 0xec, 0x48, 0x55, 0x6e, 0xa4, 0x0b, 0xaf, 0x89, 0x80, 0x6f, 0x8a, 0x11, 0xe5, 0x8d, + 0x91, 0x30, 0x22, 0x9a, 0xe9, 0x46, 0x04, 0x86, 0x3f, 0x40, 0xb3, 0xf0, 0xaf, 0x56, 0x20, 0x1f, + 0xdc, 0x24, 0x7c, 0x54, 0x8d, 0x69, 0xaf, 0x71, 0x1a, 0xe7, 0x06, 0xfd, 0xca, 0x99, 0xae, 0x8a, + 0x6b, 0xa6, 0x67, 0x34, 0x11, 0x7e, 0x1f, 0x09, 0xa0, 0x45, 0xc4, 0x03, 0x0e, 0xf9, 0x2c, 0xec, + 0xac, 0xd6, 0x81, 0xfa, 0xb8, 0x43, 0xcc, 0x6b, 0x57, 0x81, 0x35, 0xf3, 0xd3, 0xaa, 0x04, 0xbf, + 0x85, 0xf2, 0xbe, 0x78, 0x2c, 0x01, 0xfc, 0x8d, 0x4f, 0x14, 0x53, 0x6f, 0x28, 0x24, 0xc3, 0x04, + 0xa2, 0x59, 0x8b, 0xb5, 0xb9, 0xa1, 0x40, 0xdc, 0xb2, 0x03, 0x95, 0x15, 0x43, 0xea, 0xe5, 0xbb, + 0x30, 0x24, 0x1b, 0xea, 0x86, 0x24, 0x88, 0x7b, 0x08, 0x47, 0x70, 0x6d, 0xd4, 0x62, 0xb4, 0x15, + 0xca, 0x8b, 0x23, 0xe0, 0x5d, 0x69, 0xe3, 0x42, 0x52, 0xbd, 0x8f, 0xba, 0x58, 0x12, 0x97, 0x62, + 0x51, 0x4a, 0xa4, 0xf5, 0x32, 0x9f, 0x96, 0x72, 0x16, 0xec, 0xc1, 0x11, 0x36, 0x7c, 0x4b, 0x0a, + 0x0b, 0x94, 0x83, 0x6d, 0xc1, 0x02, 0xd1, 0x4c, 0x67, 0x81, 0xc0, 0x04, 0xc1, 0xe5, 0x69, 0x0c, + 0x7c, 0x5c, 0x1a, 0xc1, 0xd5, 0x63, 0x9a, 0x98, 0xe0, 0x12, 0x4b, 0x13, 0x5c, 0xc2, 0xb8, 0x85, + 0x66, 0x02, 0xb5, 0x1a, 0x83, 0x8d, 0x90, 0xc2, 0xaa, 0xc7, 0x4b, 0x35, 0xc1, 0x2a, 0x4d, 0x49, + 0x67, 0x95, 0x26, 0xc2, 0xbb, 0x08, 0xd9, 0x49, 0x1d, 0x02, 0x67, 0xbe, 0xa5, 0x8d, 0x33, 0xb1, + 0xf5, 0x54, 0x85, 0xd2, 0x30, 0xf9, 0xe6, 0x67, 0xd8, 0x5c, 0xb3, 0xab, 0x98, 0xe1, 0x61, 0xb0, + 0xe3, 0x44, 0x0c, 0xa7, 0xe3, 0x4a, 0x18, 0xf4, 0x0c, 0x2d, 0x57, 0xd8, 0x18, 0xd3, 0xc3, 0x90, + 0xc0, 0xdc, 0x4b, 0x96, 0xa4, 0x21, 0x38, 0x38, 0x57, 0xbc, 0x4c, 0x25, 0x28, 0xe1, 0xe5, 0xb0, + 0xb9, 0xee, 0xe5, 0x10, 0xc7, 0xef, 0xa1, 0x52, 0x34, 0xdc, 0xfc, 0x99, 0x73, 0x60, 0xd5, 0x7c, + 0xd2, 0xbe, 0x50, 0x14, 0x85, 0x8a, 0x82, 0x66, 0x57, 0xb5, 0x84, 0x7f, 0x80, 0xa6, 0xe3, 0xeb, + 0x5d, 0xd7, 0xdb, 0xa3, 0xe6, 0x82, 0x6e, 0x39, 0x7d, 0xb3, 0x2b, 0x2c, 0xbb, 0x43, 0x54, 0xb7, + 0xac, 0x08, 0xb0, 0x8d, 0x66, 0x03, 0x6d, 0x13, 0x64, 0x62, 0xb0, 0x7d, 0x6e, 0x04, 0x1f, 0x92, + 0x59, 0x3b, 0x3f, 0xe8, 0x57, 0x4c, 0x5d, 0x4d, 0xeb, 0x21, 0x65, 0x92, 0xcf, 0x9e, 0x1f, 0x1f, + 0x1d, 0x9b, 0xcb, 0xfa, 0xec, 0xe9, 0x67, 0xca, 0x32, 0x4d, 0xc5, 0x98, 0x3e, 0x7b, 0x09, 0xcc, + 0x67, 0xcf, 0x4f, 0x4e, 0xec, 0xcd, 0x15, 0x7d, 0xf6, 0x52, 0x67, 0xf9, 0x62, 0xf6, 0x86, 0xcd, + 0xf5, 0xd9, 0x1b, 0xe2, 0x8d, 0x02, 0xca, 0xc1, 0x53, 0xe8, 0xb0, 0xfa, 0xd3, 0x0c, 0x9a, 0x4b, + 0xdd, 0x7c, 0xe0, 0xff, 0x43, 0x53, 0x90, 0x17, 0x45, 0x89, 0x82, 0x07, 0xfd, 0xca, 0xac, 0xa7, + 0x27, 0x45, 0x90, 0xe3, 0x0d, 0x54, 0x88, 0x6f, 0xa0, 0xe4, 0x15, 0x04, 0x94, 0x27, 0x31, 0xa6, + 0x96, 0x27, 0x31, 0x86, 0xeb, 0x28, 0xdf, 0x13, 0x09, 0x4d, 0x16, 0x28, 0xb0, 0x96, 0x49, 0x48, + 0x4d, 0xbb, 0x12, 0x52, 0xb2, 0xe6, 0xd4, 0x31, 0x6e, 0xd9, 0x92, 0x0b, 0x98, 0xec, 0xb3, 0x5c, + 0xc0, 0x54, 0x3f, 0x42, 0x18, 0x02, 0xb8, 0xcb, 0x02, 0x62, 0xf5, 0xe2, 0x8c, 0xbb, 0x86, 0x32, + 0x49, 0xa1, 0x36, 0x3f, 0xe8, 0x57, 0xa6, 0x5d, 0xb5, 0x0e, 0xc9, 0xb8, 0x0e, 0x6e, 0x0c, 0x47, + 0x23, 0x72, 0xe8, 0x02, 0x74, 0xa8, 0xe6, 0xed, 0xa7, 0x0d, 0xb0, 0xfa, 0xcb, 0x0c, 0x9a, 0xd9, + 0x81, 0xea, 0xad, 0x29, 0x6a, 0xcd, 0x63, 0xf4, 0xfb, 0x12, 0xca, 0xde, 0xb1, 0x98, 0xbd, 0x0f, + 0xbd, 0x16, 0xc4, 0xd0, 0x00, 0x50, 0x87, 0x06, 0x00, 0xde, 0x44, 0x73, 0x7b, 0x01, 0xed, 0xb5, + 0x64, 0x77, 0xbc, 0xc6, 0x12, 0x81, 0x87, 0xa5, 0x8e, 0x8b, 0xa4, 0xa3, 0x5a, 0x91, 0x35, 0xa3, + 0x09, 0x86, 0x45, 0xe5, 0xd4, 0x53, 0x8b, 0xca, 0x37, 0xd0, 0x2c, 0x09, 0x02, 0x1a, 0x6c, 0xef, + 0x5d, 0x73, 0xc3, 0x90, 0x73, 0x36, 0x0b, 0x3e, 0xc2, 0x87, 0xa4, 0x4b, 0x14, 0xe5, 0x94, 0x4e, + 0xf5, 0x37, 0x06, 0x9a, 0x7e, 0x8f, 0xfb, 0x1f, 0xc7, 0x24, 0xf1, 0xc0, 0x78, 0xaa, 0x07, 0x27, + 0xab, 0x9b, 0x2f, 0xa1, 0x3c, 0xc4, 0x29, 0x89, 0x8f, 0x48, 0x57, 0x01, 0xed, 0x69, 0x0a, 0x39, + 0x81, 0x5c, 0x7c, 0x07, 0x65, 0x81, 0x56, 0xb8, 0x88, 0xb2, 0x5b, 0xdc, 0xf7, 0xf9, 0x09, 0x5c, + 0x42, 0xf9, 0xad, 0xdb, 0xae, 0xcd, 0x88, 0x33, 0x6f, 0xe0, 0x3c, 0x9a, 0xbc, 0x7e, 0xfd, 0xda, + 0x7c, 0x06, 0x2f, 0xa1, 0xf9, 0x37, 0x88, 0xe5, 0x74, 0x5d, 0x8f, 0x6c, 0xdd, 0x15, 0x99, 0x69, + 0x7e, 0x12, 0x4f, 0xa3, 0x42, 0x93, 0x1c, 0x10, 0x68, 0x3c, 0xb5, 0xf1, 0x0f, 0x03, 0x65, 0xc5, + 0x06, 0x81, 0xa0, 0xb9, 0xb7, 0x08, 0x13, 0x7c, 0x00, 0x24, 0xc4, 0x38, 0x49, 0x82, 0x09, 0x45, + 0xca, 0x67, 0x86, 0x3c, 0xd3, 0x38, 0x5b, 0x7d, 0xe1, 0x27, 0x7f, 0x79, 0xf4, 0xab, 0xcc, 0x85, + 0xaa, 0x59, 0xbf, 0xfd, 0xff, 0xf5, 0x03, 0xda, 0xbe, 0x14, 0x12, 0x56, 0xbf, 0x07, 0x81, 0xf9, + 0xb8, 0x7e, 0xcf, 0x75, 0x3e, 0xbe, 0x62, 0x5c, 0x7c, 0xc5, 0xc0, 0x57, 0x50, 0x16, 0xc2, 0x8b, + 0x05, 0x61, 0xd5, 0x50, 0x3f, 0xd9, 0xf6, 0xe4, 0x27, 0x19, 0x03, 0x74, 0x73, 0x6f, 0xc3, 0x9f, + 0x3d, 0xe0, 0x95, 0xc7, 0xf6, 0x0b, 0x5b, 0x3c, 0x48, 0x65, 0xb1, 0x5a, 0x8b, 0x46, 0x9b, 0xfb, + 0xc4, 0x3e, 0x6c, 0x92, 0xd0, 0xa7, 0x5e, 0x48, 0x1a, 0x1f, 0x7c, 0xf9, 0xf7, 0xd5, 0x89, 0x1f, + 0x3f, 0x58, 0x35, 0xbe, 0x78, 0xb0, 0x6a, 0xdc, 0x7f, 0xb0, 0x6a, 0xfc, 0xed, 0xc1, 0xaa, 0xf1, + 0xe9, 0xc3, 0xd5, 0x89, 0xfb, 0x0f, 0x57, 0x27, 0xbe, 0x7c, 0xb8, 0x3a, 0xf1, 0xa3, 0x17, 0x95, + 0xbf, 0x93, 0xb0, 0x82, 0x9e, 0xe5, 0x58, 0x7e, 0x40, 0x79, 0xa0, 0xe4, 0xaf, 0xf8, 0xcf, 0x1c, + 0x7e, 0x97, 0x59, 0xba, 0x0a, 0xc0, 0x0d, 0x21, 0xae, 0x6d, 0xd3, 0xda, 0x55, 0xdf, 0x6d, 0xe7, + 0xc0, 0x97, 0xcb, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xfc, 0x80, 0xa6, 0xf4, 0x31, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3306,7 +3020,7 @@ func (m *JobQueuedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobDuplicateFoundEvent) Marshal() (dAtA []byte, err error) { +func (m *JobLeasedEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3316,20 +3030,20 @@ func (m *JobDuplicateFoundEvent) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *JobDuplicateFoundEvent) MarshalTo(dAtA []byte) (int, error) { +func (m *JobLeasedEvent) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *JobDuplicateFoundEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *JobLeasedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.OriginalJobId) > 0 { - i -= len(m.OriginalJobId) - copy(dAtA[i:], m.OriginalJobId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.OriginalJobId))) + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarintEvent(dAtA, i, uint64(len(m.ClusterId))) i-- dAtA[i] = 0x2a } @@ -3365,7 +3079,7 @@ func (m *JobDuplicateFoundEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *JobLeasedEvent) Marshal() (dAtA []byte, err error) { +func (m *JobLeaseReturnedEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3375,84 +3089,25 @@ func (m *JobLeasedEvent) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *JobLeasedEvent) MarshalTo(dAtA []byte) (int, error) { +func (m *JobLeaseReturnedEvent) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *JobLeasedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *JobLeaseReturnedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ClusterId) > 0 { - i -= len(m.ClusterId) - copy(dAtA[i:], m.ClusterId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.ClusterId))) + if m.RunAttempted { i-- - dAtA[i] = 0x2a - } - n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err5 != nil { - return 0, err5 - } - i -= n5 - i = encodeVarintEvent(dAtA, i, uint64(n5)) - i-- - dAtA[i] = 0x22 - if len(m.Queue) > 0 { - i -= len(m.Queue) - copy(dAtA[i:], m.Queue) - i = encodeVarintEvent(dAtA, i, uint64(len(m.Queue))) + if m.RunAttempted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- - dAtA[i] = 0x1a - } - if len(m.JobSetId) > 0 { - i -= len(m.JobSetId) - copy(dAtA[i:], m.JobSetId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.JobSetId))) - i-- - dAtA[i] = 0x12 - } - if len(m.JobId) > 0 { - i -= len(m.JobId) - copy(dAtA[i:], m.JobId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.JobId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *JobLeaseReturnedEvent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobLeaseReturnedEvent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobLeaseReturnedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RunAttempted { - i-- - if m.RunAttempted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 + dAtA[i] = 0x48 } if m.PodNumber != 0 { i = encodeVarintEvent(dAtA, i, uint64(m.PodNumber)) @@ -3480,12 +3135,12 @@ func (m *JobLeaseReturnedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err6 != nil { - return 0, err6 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err5 != nil { + return 0, err5 } - i -= n6 - i = encodeVarintEvent(dAtA, i, uint64(n6)) + i -= n5 + i = encodeVarintEvent(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -3532,12 +3187,12 @@ func (m *JobLeaseExpiredEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err7 != nil { - return 0, err7 + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err6 != nil { + return 0, err6 } - i -= n7 - i = encodeVarintEvent(dAtA, i, uint64(n7)) + i -= n6 + i = encodeVarintEvent(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -3617,12 +3272,12 @@ func (m *JobPendingEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err8 != nil { - return 0, err8 + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err7 != nil { + return 0, err7 } - i -= n8 - i = encodeVarintEvent(dAtA, i, uint64(n8)) + i -= n7 + i = encodeVarintEvent(dAtA, i, uint64(n7)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -3709,12 +3364,12 @@ func (m *JobRunningEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err9 != nil { - return 0, err9 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err8 != nil { + return 0, err8 } - i -= n9 - i = encodeVarintEvent(dAtA, i, uint64(n9)) + i -= n8 + i = encodeVarintEvent(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -3818,12 +3473,12 @@ func (m *JobIngressInfoEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err10 != nil { - return 0, err10 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err9 != nil { + return 0, err9 } - i -= n10 - i = encodeVarintEvent(dAtA, i, uint64(n10)) + i -= n9 + i = encodeVarintEvent(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -3917,12 +3572,12 @@ func (m *JobUnableToScheduleEvent) MarshalToSizedBuffer(dAtA []byte) (int, error i-- dAtA[i] = 0x2a } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err11 != nil { - return 0, err11 + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err10 != nil { + return 0, err10 } - i -= n11 - i = encodeVarintEvent(dAtA, i, uint64(n11)) + i -= n10 + i = encodeVarintEvent(dAtA, i, uint64(n10)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4052,12 +3707,12 @@ func (m *JobFailedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err12 != nil { - return 0, err12 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err11 != nil { + return 0, err11 } - i -= n12 - i = encodeVarintEvent(dAtA, i, uint64(n12)) + i -= n11 + i = encodeVarintEvent(dAtA, i, uint64(n11)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4111,12 +3766,12 @@ func (m *JobPreemptingEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err13 != nil { - return 0, err13 + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err12 != nil { + return 0, err12 } - i -= n13 - i = encodeVarintEvent(dAtA, i, uint64(n13)) + i -= n12 + i = encodeVarintEvent(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4191,12 +3846,12 @@ func (m *JobPreemptedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err14 != nil { - return 0, err14 + n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err13 != nil { + return 0, err13 } - i -= n14 - i = encodeVarintEvent(dAtA, i, uint64(n14)) + i -= n13 + i = encodeVarintEvent(dAtA, i, uint64(n13)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4223,36 +3878,6 @@ func (m *JobPreemptedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobFailedEventCompressed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobFailedEventCompressed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobFailedEventCompressed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Event) > 0 { - i -= len(m.Event) - copy(dAtA[i:], m.Event) - i = encodeVarintEvent(dAtA, i, uint64(len(m.Event))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *JobSucceededEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4313,12 +3938,12 @@ func (m *JobSucceededEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err15 != nil { - return 0, err15 + n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err14 != nil { + return 0, err14 } - i -= n15 - i = encodeVarintEvent(dAtA, i, uint64(n15)) + i -= n14 + i = encodeVarintEvent(dAtA, i, uint64(n14)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4453,12 +4078,12 @@ func (m *JobUtilisationEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err18 != nil { - return 0, err18 + n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err17 != nil { + return 0, err17 } - i -= n18 - i = encodeVarintEvent(dAtA, i, uint64(n18)) + i -= n17 + i = encodeVarintEvent(dAtA, i, uint64(n17)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4518,12 +4143,12 @@ func (m *JobReprioritizingEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x29 } - n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err19 != nil { - return 0, err19 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err18 != nil { + return 0, err18 } - i -= n19 - i = encodeVarintEvent(dAtA, i, uint64(n19)) + i -= n18 + i = encodeVarintEvent(dAtA, i, uint64(n18)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4583,12 +4208,12 @@ func (m *JobReprioritizedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x29 } - n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err20 != nil { - return 0, err20 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err19 != nil { + return 0, err19 } - i -= n20 - i = encodeVarintEvent(dAtA, i, uint64(n20)) + i -= n19 + i = encodeVarintEvent(dAtA, i, uint64(n19)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4649,12 +4274,12 @@ func (m *JobCancellingEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err21 != nil { - return 0, err21 + n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err20 != nil { + return 0, err20 } - i -= n21 - i = encodeVarintEvent(dAtA, i, uint64(n21)) + i -= n20 + i = encodeVarintEvent(dAtA, i, uint64(n20)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4715,12 +4340,12 @@ func (m *JobCancelledEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err22 != nil { - return 0, err22 + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err21 != nil { + return 0, err21 } - i -= n22 - i = encodeVarintEvent(dAtA, i, uint64(n22)) + i -= n21 + i = encodeVarintEvent(dAtA, i, uint64(n21)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4807,12 +4432,12 @@ func (m *JobTerminatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - n23, err23 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err23 != nil { - return 0, err23 + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) + if err22 != nil { + return 0, err22 } - i -= n23 - i = encodeVarintEvent(dAtA, i, uint64(n23)) + i -= n22 + i = encodeVarintEvent(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x22 if len(m.Queue) > 0 { @@ -4839,7 +4464,7 @@ func (m *JobTerminatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobUpdatedEvent) Marshal() (dAtA []byte, err error) { +func (m *EventMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4849,113 +4474,44 @@ func (m *JobUpdatedEvent) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *JobUpdatedEvent) MarshalTo(dAtA []byte) (int, error) { +func (m *EventMessage) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *JobUpdatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EventMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Job.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Events != nil { + { + size := m.Events.Size() + i -= size + if _, err := m.Events.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } } - i -= size - i = encodeVarintEvent(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x32 - if len(m.Requestor) > 0 { - i -= len(m.Requestor) - copy(dAtA[i:], m.Requestor) - i = encodeVarintEvent(dAtA, i, uint64(len(m.Requestor))) - i-- - dAtA[i] = 0x2a - } - n25, err25 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Created, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Created):]) - if err25 != nil { - return 0, err25 - } - i -= n25 - i = encodeVarintEvent(dAtA, i, uint64(n25)) - i-- - dAtA[i] = 0x22 - if len(m.Queue) > 0 { - i -= len(m.Queue) - copy(dAtA[i:], m.Queue) - i = encodeVarintEvent(dAtA, i, uint64(len(m.Queue))) - i-- - dAtA[i] = 0x1a - } - if len(m.JobSetId) > 0 { - i -= len(m.JobSetId) - copy(dAtA[i:], m.JobSetId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.JobSetId))) - i-- - dAtA[i] = 0x12 - } - if len(m.JobId) > 0 { - i -= len(m.JobId) - copy(dAtA[i:], m.JobId) - i = encodeVarintEvent(dAtA, i, uint64(len(m.JobId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EventMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Events != nil { - { - size := m.Events.Size() - i -= size - if _, err := m.Events.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *EventMessage_Submitted) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventMessage_Submitted) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Submitted != nil { - { - size, err := m.Submitted.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvent(dAtA, i, uint64(size)) - } + return len(dAtA) - i, nil +} + +func (m *EventMessage_Submitted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventMessage_Submitted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Submitted != nil { + { + size, err := m.Submitted.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } @@ -5255,29 +4811,6 @@ func (m *EventMessage_Utilisation) MarshalToSizedBuffer(dAtA []byte) (int, error } return len(dAtA) - i, nil } -func (m *EventMessage_DuplicateFound) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventMessage_DuplicateFound) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DuplicateFound != nil { - { - size, err := m.DuplicateFound.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - return len(dAtA) - i, nil -} func (m *EventMessage_IngressInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5324,52 +4857,6 @@ func (m *EventMessage_Reprioritizing) MarshalToSizedBuffer(dAtA []byte) (int, er } return len(dAtA) - i, nil } -func (m *EventMessage_Updated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventMessage_Updated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Updated != nil { - { - size, err := m.Updated.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - return len(dAtA) - i, nil -} -func (m *EventMessage_FailedCompressed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventMessage_FailedCompressed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.FailedCompressed != nil { - { - size, err := m.FailedCompressed.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - return len(dAtA) - i, nil -} func (m *EventMessage_Preempted) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -5532,26 +5019,6 @@ func (m *JobSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ForceNew { - i-- - if m.ForceNew { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.ForceLegacy { - i-- - if m.ForceLegacy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } if m.ErrorIfMissing { i-- if m.ErrorIfMissing { @@ -5616,26 +5083,6 @@ func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ForceNew { - i-- - if m.ForceNew { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.ForceLegacy { - i-- - if m.ForceLegacy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } if len(m.FromId) > 0 { i -= len(m.FromId) copy(dAtA[i:], m.FromId) @@ -5719,33 +5166,6 @@ func (m *JobQueuedEvent) Size() (n int) { return n } -func (m *JobDuplicateFoundEvent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.JobId) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = len(m.JobSetId) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = len(m.Queue) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Created) - n += 1 + l + sovEvent(uint64(l)) - l = len(m.OriginalJobId) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - return n -} - func (m *JobLeasedEvent) Size() (n int) { if m == nil { return 0 @@ -6162,19 +5582,6 @@ func (m *JobPreemptedEvent) Size() (n int) { return n } -func (m *JobFailedEventCompressed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Event) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - return n -} - func (m *JobSucceededEvent) Size() (n int) { if m == nil { return 0 @@ -6453,35 +5860,6 @@ func (m *JobTerminatedEvent) Size() (n int) { return n } -func (m *JobUpdatedEvent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.JobId) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = len(m.JobSetId) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = len(m.Queue) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Created) - n += 1 + l + sovEvent(uint64(l)) - l = len(m.Requestor) - if l > 0 { - n += 1 + l + sovEvent(uint64(l)) - } - l = m.Job.Size() - n += 1 + l + sovEvent(uint64(l)) - return n -} - func (m *EventMessage) Size() (n int) { if m == nil { return 0 @@ -6674,18 +6052,6 @@ func (m *EventMessage_Utilisation) Size() (n int) { } return n } -func (m *EventMessage_DuplicateFound) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DuplicateFound != nil { - l = m.DuplicateFound.Size() - n += 2 + l + sovEvent(uint64(l)) - } - return n -} func (m *EventMessage_IngressInfo) Size() (n int) { if m == nil { return 0 @@ -6710,30 +6076,6 @@ func (m *EventMessage_Reprioritizing) Size() (n int) { } return n } -func (m *EventMessage_Updated) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Updated != nil { - l = m.Updated.Size() - n += 2 + l + sovEvent(uint64(l)) - } - return n -} -func (m *EventMessage_FailedCompressed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FailedCompressed != nil { - l = m.FailedCompressed.Size() - n += 2 + l + sovEvent(uint64(l)) - } - return n -} func (m *EventMessage_Preempted) Size() (n int) { if m == nil { return 0 @@ -6826,12 +6168,6 @@ func (m *JobSetRequest) Size() (n int) { if m.ErrorIfMissing { n += 2 } - if m.ForceLegacy { - n += 2 - } - if m.ForceNew { - n += 2 - } return n } @@ -6853,12 +6189,6 @@ func (m *WatchRequest) Size() (n int) { if l > 0 { n += 1 + l + sovEvent(uint64(l)) } - if m.ForceLegacy { - n += 2 - } - if m.ForceNew { - n += 2 - } return n } @@ -6895,20 +6225,6 @@ func (this *JobQueuedEvent) String() string { }, "") return s } -func (this *JobDuplicateFoundEvent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobDuplicateFoundEvent{`, - `JobId:` + fmt.Sprintf("%v", this.JobId) + `,`, - `JobSetId:` + fmt.Sprintf("%v", this.JobSetId) + `,`, - `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, - `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `OriginalJobId:` + fmt.Sprintf("%v", this.OriginalJobId) + `,`, - `}`, - }, "") - return s -} func (this *JobLeasedEvent) String() string { if this == nil { return "nil" @@ -7110,16 +6426,6 @@ func (this *JobPreemptedEvent) String() string { }, "") return s } -func (this *JobFailedEventCompressed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobFailedEventCompressed{`, - `Event:` + fmt.Sprintf("%v", this.Event) + `,`, - `}`, - }, "") - return s -} func (this *JobSucceededEvent) String() string { if this == nil { return "nil" @@ -7259,21 +6565,6 @@ func (this *JobTerminatedEvent) String() string { }, "") return s } -func (this *JobUpdatedEvent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobUpdatedEvent{`, - `JobId:` + fmt.Sprintf("%v", this.JobId) + `,`, - `JobSetId:` + fmt.Sprintf("%v", this.JobSetId) + `,`, - `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, - `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Requestor:` + fmt.Sprintf("%v", this.Requestor) + `,`, - `Job:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Job), "Job", "Job", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *EventMessage) String() string { if this == nil { return "nil" @@ -7434,16 +6725,6 @@ func (this *EventMessage_Utilisation) String() string { }, "") return s } -func (this *EventMessage_DuplicateFound) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventMessage_DuplicateFound{`, - `DuplicateFound:` + strings.Replace(fmt.Sprintf("%v", this.DuplicateFound), "JobDuplicateFoundEvent", "JobDuplicateFoundEvent", 1) + `,`, - `}`, - }, "") - return s -} func (this *EventMessage_IngressInfo) String() string { if this == nil { return "nil" @@ -7464,26 +6745,6 @@ func (this *EventMessage_Reprioritizing) String() string { }, "") return s } -func (this *EventMessage_Updated) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventMessage_Updated{`, - `Updated:` + strings.Replace(fmt.Sprintf("%v", this.Updated), "JobUpdatedEvent", "JobUpdatedEvent", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventMessage_FailedCompressed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventMessage_FailedCompressed{`, - `FailedCompressed:` + strings.Replace(fmt.Sprintf("%v", this.FailedCompressed), "JobFailedEventCompressed", "JobFailedEventCompressed", 1) + `,`, - `}`, - }, "") - return s -} func (this *EventMessage_Preempted) String() string { if this == nil { return "nil" @@ -7539,8 +6800,6 @@ func (this *JobSetRequest) String() string { `FromMessageId:` + fmt.Sprintf("%v", this.FromMessageId) + `,`, `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, `ErrorIfMissing:` + fmt.Sprintf("%v", this.ErrorIfMissing) + `,`, - `ForceLegacy:` + fmt.Sprintf("%v", this.ForceLegacy) + `,`, - `ForceNew:` + fmt.Sprintf("%v", this.ForceNew) + `,`, `}`, }, "") return s @@ -7553,8 +6812,6 @@ func (this *WatchRequest) String() string { `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, `JobSetId:` + fmt.Sprintf("%v", this.JobSetId) + `,`, `FromId:` + fmt.Sprintf("%v", this.FromId) + `,`, - `ForceLegacy:` + fmt.Sprintf("%v", this.ForceLegacy) + `,`, - `ForceNew:` + fmt.Sprintf("%v", this.ForceNew) + `,`, `}`, }, "") return s @@ -7958,7 +7215,7 @@ func (m *JobQueuedEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobDuplicateFoundEvent) Unmarshal(dAtA []byte) error { +func (m *JobLeasedEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7981,10 +7238,10 @@ func (m *JobDuplicateFoundEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobDuplicateFoundEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobLeasedEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobDuplicateFoundEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobLeasedEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -8118,7 +7375,7 @@ func (m *JobDuplicateFoundEvent) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalJobId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8146,7 +7403,7 @@ func (m *JobDuplicateFoundEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OriginalJobId = string(dAtA[iNdEx:postIndex]) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8169,7 +7426,7 @@ func (m *JobDuplicateFoundEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobLeasedEvent) Unmarshal(dAtA []byte) error { +func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8192,10 +7449,10 @@ func (m *JobLeasedEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobLeasedEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobLeaseReturnedEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobLeasedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobLeaseReturnedEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -8359,6 +7616,109 @@ func (m *JobLeasedEvent) Unmarshal(dAtA []byte) error { } m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubernetesId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNumber", wireType) + } + m.PodNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PodNumber |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAttempted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAttempted = bool(v != 0) default: iNdEx = preIndex skippy, err := skipEvent(dAtA[iNdEx:]) @@ -8380,7 +7740,7 @@ func (m *JobLeasedEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { +func (m *JobLeaseExpiredEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8403,10 +7763,10 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobLeaseReturnedEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobLeaseExpiredEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobLeaseReturnedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobLeaseExpiredEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -8538,9 +7898,59 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobPendingEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobPendingEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobPendingEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8568,11 +7978,11 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterId = string(dAtA[iNdEx:postIndex]) + m.JobId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8600,11 +8010,11 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.JobSetId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8632,13 +8042,13 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KubernetesId = string(dAtA[iNdEx:postIndex]) + m.Queue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodNumber", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) } - m.PodNumber = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvent @@ -8648,371 +8058,7 @@ func (m *JobLeaseReturnedEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PodNumber |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAttempted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAttempted = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobLeaseExpiredEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobLeaseExpiredEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobLeaseExpiredEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobSetId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Created, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobPendingEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobPendingEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobPendingEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobSetId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11453,107 +10499,23 @@ func (m *JobPreemptedEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreemptiveRunId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobFailedEventCompressed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobFailedEventCompressed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobFailedEventCompressed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthEvent } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthEvent } if postIndex > l { return io.ErrUnexpectedEOF } - m.Event = append(m.Event[:0], dAtA[iNdEx:postIndex]...) - if m.Event == nil { - m.Event = []byte{} - } + m.PreemptiveRunId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -12486,270 +11448,48 @@ func (m *JobUtilisationEvent) Unmarshal(dAtA []byte) error { for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthEvent - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthEvent - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TotalCumulativeUsage[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobReprioritizingEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobReprioritizingEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobReprioritizingEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JobSetId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Created, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPriority", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.NewPriority = float64(math.Float64frombits(v)) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requestor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthEvent + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthEvent + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requestor = string(dAtA[iNdEx:postIndex]) + m.TotalCumulativeUsage[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -12772,7 +11512,7 @@ func (m *JobReprioritizingEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobReprioritizedEvent) Unmarshal(dAtA []byte) error { +func (m *JobReprioritizingEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12795,10 +11535,10 @@ func (m *JobReprioritizedEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobReprioritizedEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobReprioritizingEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobReprioritizedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobReprioritizingEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12994,7 +11734,7 @@ func (m *JobReprioritizedEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { +func (m *JobReprioritizedEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13017,10 +11757,10 @@ func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobCancellingEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobReprioritizedEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobCancellingEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobReprioritizedEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13153,40 +11893,19 @@ func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requestor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvent + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPriority", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Requestor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.NewPriority = float64(math.Float64frombits(v)) case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requestor", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13214,7 +11933,7 @@ func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Requestor = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -13237,7 +11956,7 @@ func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobCancelledEvent) Unmarshal(dAtA []byte) error { +func (m *JobCancellingEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13260,10 +11979,10 @@ func (m *JobCancelledEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobCancelledEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobCancellingEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobCancelledEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobCancellingEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13480,7 +12199,7 @@ func (m *JobCancelledEvent) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { +func (m *JobCancelledEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13503,10 +12222,10 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobTerminatedEvent: wiretype end group for non-group") + return fmt.Errorf("proto: JobCancelledEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobTerminatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobCancelledEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13640,7 +12359,7 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requestor", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13668,11 +12387,11 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterId = string(dAtA[iNdEx:postIndex]) + m.Requestor = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13700,30 +12419,61 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KubernetesId = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodNumber", wireType) + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err } - m.PodNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PodNumber |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent } - case 8: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTerminatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTerminatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13751,11 +12501,11 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.JobId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13783,11 +12533,11 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PodName = string(dAtA[iNdEx:postIndex]) + m.JobSetId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13813,65 +12563,15 @@ func (m *JobTerminatedEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEvent } if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodNamespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobUpdatedEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobUpdatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + return io.ErrUnexpectedEOF + } + m.Queue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvent @@ -13881,27 +12581,28 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthEvent } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthEvent } if postIndex > l { return io.ErrUnexpectedEOF } - m.JobId = string(dAtA[iNdEx:postIndex]) + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Created, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobSetId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13929,11 +12630,11 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.JobSetId = string(dAtA[iNdEx:postIndex]) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13961,13 +12662,32 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Queue = string(dAtA[iNdEx:postIndex]) + m.KubernetesId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodNumber", wireType) + } + m.PodNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PodNumber |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvent @@ -13977,28 +12697,27 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthEvent } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthEvent } if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Created, dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requestor", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14026,13 +12745,13 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Requestor = string(dAtA[iNdEx:postIndex]) + m.PodName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodNamespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvent @@ -14042,24 +12761,23 @@ func (m *JobUpdatedEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthEvent } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthEvent } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PodNamespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -14636,41 +13354,6 @@ func (m *EventMessage) Unmarshal(dAtA []byte) error { } m.Events = &EventMessage_Utilisation{v} iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DuplicateFound", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &JobDuplicateFoundEvent{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Events = &EventMessage_DuplicateFound{v} - iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field IngressInfo", wireType) @@ -14741,76 +13424,6 @@ func (m *EventMessage) Unmarshal(dAtA []byte) error { } m.Events = &EventMessage_Reprioritizing{v} iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Updated", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &JobUpdatedEvent{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Events = &EventMessage_Updated{v} - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailedCompressed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &JobFailedEventCompressed{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Events = &EventMessage_FailedCompressed{v} - iNdEx = postIndex case 21: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Preempted", wireType) @@ -15369,46 +13982,6 @@ func (m *JobSetRequest) Unmarshal(dAtA []byte) error { } } m.ErrorIfMissing = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceLegacy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceLegacy = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceNew", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceNew = bool(v != 0) default: iNdEx = preIndex skippy, err := skipEvent(dAtA[iNdEx:]) @@ -15555,46 +14128,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } m.FromId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceLegacy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceLegacy = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceNew", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceNew = bool(v != 0) default: iNdEx = preIndex skippy, err := skipEvent(dAtA[iNdEx:]) diff --git a/pkg/api/event.proto b/pkg/api/event.proto index 97a5f771b0f..908c7fa26cb 100644 --- a/pkg/api/event.proto +++ b/pkg/api/event.proto @@ -30,14 +30,6 @@ message JobQueuedEvent { google.protobuf.Timestamp created = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; } -message JobDuplicateFoundEvent { - string job_id = 1; - string job_set_id = 2; - string queue = 3; - google.protobuf.Timestamp created = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - string original_job_id = 5; -} - message JobLeasedEvent { string job_id = 1; string job_set_id = 2; @@ -154,11 +146,6 @@ message JobPreemptedEvent { string preemptive_run_id = 8; } -// Only used internally by Armada -message JobFailedEventCompressed { - bytes event = 1; -} - message JobSucceededEvent { string job_id = 1; string job_set_id = 2; @@ -236,20 +223,11 @@ message JobTerminatedEvent { string reason = 8; } -message JobUpdatedEvent { - string job_id = 1; - string job_set_id = 2; - string queue = 3; - google.protobuf.Timestamp created = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - string requestor = 5; - Job job = 6 [(gogoproto.nullable) = false]; -} message EventMessage { oneof events { JobSubmittedEvent submitted = 1; JobQueuedEvent queued = 2; - JobDuplicateFoundEvent duplicate_found = 16; JobLeasedEvent leased = 3; JobLeaseReturnedEvent lease_returned = 4; JobLeaseExpiredEvent lease_expired = 5; @@ -265,8 +243,6 @@ message EventMessage { JobUtilisationEvent utilisation = 15; JobIngressInfoEvent ingress_info = 17; JobReprioritizingEvent reprioritizing = 18; - JobUpdatedEvent updated = 19; - JobFailedEventCompressed failedCompressed = 20; // This event is for internal armada use only JobPreemptedEvent preempted = 21; JobPreemptingEvent preempting = 22; } @@ -277,6 +253,7 @@ enum Cause { Evicted = 1; OOM = 2; DeadlineExceeded = 3; + Rejected = 4; } message ContainerStatus { @@ -300,16 +277,12 @@ message JobSetRequest { string from_message_id = 3; string queue = 4; bool errorIfMissing = 5; - bool force_legacy = 6; // This field is for test purposes only - bool force_new = 7; // This field is for test purposes only } message WatchRequest { string queue = 1; string job_set_id = 2; string from_id = 3; - bool force_legacy = 4; // This field is for test purposes only - bool force_new = 5; // This field is for test purposes only } service Event { diff --git a/pkg/api/event_util.go b/pkg/api/event_util.go index cfce5205680..fabf707ed99 100644 --- a/pkg/api/event_util.go +++ b/pkg/api/event_util.go @@ -39,8 +39,6 @@ func UnwrapEvent(message *EventMessage) (Event, error) { return event.Submitted, nil case *EventMessage_Queued: return event.Queued, nil - case *EventMessage_DuplicateFound: - return event.DuplicateFound, nil case *EventMessage_Leased: return event.Leased, nil case *EventMessage_LeaseReturned: @@ -71,8 +69,6 @@ func UnwrapEvent(message *EventMessage) (Event, error) { return event.Utilisation, nil case *EventMessage_IngressInfo: return event.IngressInfo, nil - case *EventMessage_Updated: - return event.Updated, nil case *EventMessage_Preempting: return event.Preempting, nil case *EventMessage_Preempted: @@ -80,135 +76,3 @@ func UnwrapEvent(message *EventMessage) (Event, error) { } return nil, errors.Errorf("unknown event type: %s", reflect.TypeOf(message.Events)) } - -func Wrap(event Event) (*EventMessage, error) { - switch typed := event.(type) { - case *JobSubmittedEvent: - return &EventMessage{ - Events: &EventMessage_Submitted{ - Submitted: typed, - }, - }, nil - case *JobQueuedEvent: - return &EventMessage{ - Events: &EventMessage_Queued{ - Queued: typed, - }, - }, nil - case *JobDuplicateFoundEvent: - return &EventMessage{ - Events: &EventMessage_DuplicateFound{ - DuplicateFound: typed, - }, - }, nil - case *JobLeasedEvent: - return &EventMessage{ - Events: &EventMessage_Leased{ - Leased: typed, - }, - }, nil - case *JobLeaseReturnedEvent: - return &EventMessage{ - Events: &EventMessage_LeaseReturned{ - LeaseReturned: typed, - }, - }, nil - case *JobLeaseExpiredEvent: - return &EventMessage{ - Events: &EventMessage_LeaseExpired{ - LeaseExpired: typed, - }, - }, nil - case *JobPendingEvent: - return &EventMessage{ - Events: &EventMessage_Pending{ - Pending: typed, - }, - }, nil - case *JobRunningEvent: - return &EventMessage{ - Events: &EventMessage_Running{ - Running: typed, - }, - }, nil - case *JobUnableToScheduleEvent: - return &EventMessage{ - Events: &EventMessage_UnableToSchedule{ - UnableToSchedule: typed, - }, - }, nil - case *JobFailedEvent: - return &EventMessage{ - Events: &EventMessage_Failed{ - Failed: typed, - }, - }, nil - case *JobSucceededEvent: - return &EventMessage{ - Events: &EventMessage_Succeeded{ - Succeeded: typed, - }, - }, nil - case *JobReprioritizingEvent: - return &EventMessage{ - Events: &EventMessage_Reprioritizing{ - Reprioritizing: typed, - }, - }, nil - case *JobReprioritizedEvent: - return &EventMessage{ - Events: &EventMessage_Reprioritized{ - Reprioritized: typed, - }, - }, nil - case *JobCancellingEvent: - return &EventMessage{ - Events: &EventMessage_Cancelling{ - Cancelling: typed, - }, - }, nil - case *JobCancelledEvent: - return &EventMessage{ - Events: &EventMessage_Cancelled{ - Cancelled: typed, - }, - }, nil - case *JobTerminatedEvent: - return &EventMessage{ - Events: &EventMessage_Terminated{ - Terminated: typed, - }, - }, nil - case *JobUtilisationEvent: - return &EventMessage{ - Events: &EventMessage_Utilisation{ - Utilisation: typed, - }, - }, nil - case *JobIngressInfoEvent: - return &EventMessage{ - Events: &EventMessage_IngressInfo{ - IngressInfo: typed, - }, - }, nil - case *JobUpdatedEvent: - return &EventMessage{ - Events: &EventMessage_Updated{ - Updated: typed, - }, - }, nil - case *JobPreemptingEvent: - return &EventMessage{ - Events: &EventMessage_Preempting{ - Preempting: typed, - }, - }, nil - case *JobPreemptedEvent: - return &EventMessage{ - Events: &EventMessage_Preempted{ - Preempted: typed, - }, - }, nil - } - return nil, errors.Errorf("unknown event type: %s", reflect.TypeOf(event)) -} diff --git a/pkg/api/submit.pb.go b/pkg/api/submit.pb.go index a03e52055e2..1840186f78e 100644 --- a/pkg/api/submit.pb.go +++ b/pkg/api/submit.pb.go @@ -151,8 +151,6 @@ type JobSubmitRequestItem struct { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - QueueTtlSeconds int64 `protobuf:"varint,12,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` } func (m *JobSubmitRequestItem) Reset() { *m = JobSubmitRequestItem{} } @@ -266,13 +264,6 @@ func (m *JobSubmitRequestItem) GetScheduler() string { return "" } -func (m *JobSubmitRequestItem) GetQueueTtlSeconds() int64 { - if m != nil { - return m.QueueTtlSeconds - } - return 0 -} - type IngressConfig struct { Type IngressType `protobuf:"varint,1,opt,name=type,proto3,enum=api.IngressType" json:"type,omitempty"` // Deprecated: Do not use. Ports []uint32 `protobuf:"varint,2,rep,packed,name=ports,proto3" json:"ports,omitempty"` @@ -1157,10 +1148,11 @@ func (m *JobSubmitResponse) GetJobResponseItems() []*JobSubmitResponseItem { // swagger:model type Queue struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - PriorityFactor float64 `protobuf:"fixed64,2,opt,name=priority_factor,json=priorityFactor,proto3" json:"priorityFactor,omitempty"` - UserOwners []string `protobuf:"bytes,3,rep,name=user_owners,json=userOwners,proto3" json:"userOwners,omitempty"` - GroupOwners []string `protobuf:"bytes,4,rep,name=group_owners,json=groupOwners,proto3" json:"groupOwners,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + PriorityFactor float64 `protobuf:"fixed64,2,opt,name=priority_factor,json=priorityFactor,proto3" json:"priorityFactor,omitempty"` + UserOwners []string `protobuf:"bytes,3,rep,name=user_owners,json=userOwners,proto3" json:"userOwners,omitempty"` + GroupOwners []string `protobuf:"bytes,4,rep,name=group_owners,json=groupOwners,proto3" json:"groupOwners,omitempty"` + // These are ignored and should be removed ResourceLimits map[string]float64 `protobuf:"bytes,5,rep,name=resource_limits,json=resourceLimits,proto3" json:"resourceLimits,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` // Deprecated: Do not use. // Map from priority class name to resource limit overrides for this queue and priority class. // If provided for a priority class, global limits for that priority class do not apply to this queue. @@ -2089,190 +2081,193 @@ func init() { func init() { proto.RegisterFile("pkg/api/submit.proto", fileDescriptor_e998bacb27df16c1) } var fileDescriptor_e998bacb27df16c1 = []byte{ - // 2927 bytes of a gzipped FileDescriptorProto + // 2972 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1b, 0xd7, - 0xd5, 0xd7, 0x88, 0x12, 0x25, 0x9e, 0xd1, 0x83, 0xba, 0x7a, 0x8d, 0x68, 0x87, 0x94, 0x27, 0x89, - 0x3f, 0x59, 0x48, 0xa8, 0x58, 0xf9, 0x82, 0xcf, 0xf6, 0x97, 0xc2, 0x10, 0x25, 0xda, 0xa6, 0x63, - 0xd3, 0x0a, 0x65, 0x25, 0x4d, 0x51, 0x94, 0x19, 0x72, 0xae, 0xa8, 0x91, 0xc8, 0x19, 0x66, 0x66, - 0x28, 0x57, 0x2d, 0x02, 0x14, 0x45, 0xd1, 0x2e, 0xba, 0x49, 0xd1, 0x5d, 0x0b, 0x64, 0xd3, 0x5d, - 0xba, 0xe8, 0xa6, 0xe8, 0xdf, 0x90, 0x65, 0x80, 0xa2, 0x40, 0xba, 0x61, 0x5a, 0xa7, 0x0f, 0x80, - 0xbb, 0xee, 0xbb, 0x28, 0xee, 0xb9, 0x33, 0x9c, 0x3b, 0x7c, 0x88, 0x94, 0x63, 0x25, 0x9b, 0xee, - 0x74, 0xcf, 0x3d, 0xe7, 0x77, 0x1e, 0x73, 0xee, 0xb9, 0xe7, 0x5c, 0x0a, 0x16, 0xea, 0xc7, 0x95, - 0x0d, 0xad, 0x6e, 0x6c, 0x38, 0x8d, 0x52, 0xcd, 0x70, 0xd3, 0x75, 0xdb, 0x72, 0x2d, 0x12, 0xd1, - 0xea, 0x46, 0xe2, 0x52, 0xc5, 0xb2, 0x2a, 0x55, 0xba, 0x81, 0xa4, 0x52, 0xe3, 0x60, 0x83, 0xd6, - 0xea, 0xee, 0x29, 0xe7, 0x48, 0xa4, 0x3a, 0x37, 0x5d, 0xa3, 0x46, 0x1d, 0x57, 0xab, 0xd5, 0x3d, - 0x06, 0xf5, 0xf8, 0x86, 0x93, 0x36, 0x2c, 0xc4, 0x2e, 0x5b, 0x36, 0xdd, 0x38, 0xb9, 0xbe, 0x51, - 0xa1, 0x26, 0xb5, 0x35, 0x97, 0xea, 0x1e, 0xcf, 0x9a, 0xc0, 0x63, 0x52, 0xf7, 0x89, 0x65, 0x1f, - 0x1b, 0x66, 0xa5, 0x17, 0xe7, 0x65, 0x4f, 0x1d, 0xe3, 0xd4, 0x4c, 0xd3, 0x72, 0x35, 0xd7, 0xb0, - 0x4c, 0xc7, 0xdb, 0x7d, 0xb5, 0x62, 0xb8, 0x87, 0x8d, 0x52, 0xba, 0x6c, 0xd5, 0x36, 0x2a, 0x56, - 0xc5, 0x0a, 0xac, 0x62, 0x2b, 0x5c, 0xe0, 0x5f, 0x1e, 0x7b, 0xdb, 0xe7, 0x43, 0xaa, 0x55, 0xdd, - 0x43, 0x4e, 0x55, 0x5b, 0x31, 0x58, 0xb8, 0x6f, 0x95, 0xf6, 0x30, 0x0e, 0x05, 0xfa, 0x41, 0x83, - 0x3a, 0x6e, 0xce, 0xa5, 0x35, 0xb2, 0x09, 0x93, 0x75, 0xdb, 0xb0, 0x6c, 0xc3, 0x3d, 0x55, 0xa4, - 0x55, 0x69, 0x4d, 0xca, 0x2c, 0xb5, 0x9a, 0x29, 0xe2, 0xd3, 0x5e, 0xb1, 0x6a, 0x86, 0x8b, 0xa1, - 0x29, 0xb4, 0xf9, 0xc8, 0x1b, 0x10, 0x33, 0xb5, 0x1a, 0x75, 0xea, 0x5a, 0x99, 0x2a, 0x91, 0x55, - 0x69, 0x2d, 0x96, 0x59, 0x6e, 0x35, 0x53, 0xf3, 0x6d, 0xa2, 0x20, 0x15, 0x70, 0x92, 0xd7, 0x21, - 0x56, 0xae, 0x1a, 0xd4, 0x74, 0x8b, 0x86, 0xae, 0x4c, 0xa2, 0x18, 0xea, 0xe2, 0xc4, 0x9c, 0x2e, - 0xea, 0xf2, 0x69, 0x64, 0x0f, 0xa2, 0x55, 0xad, 0x44, 0xab, 0x8e, 0x32, 0xb6, 0x1a, 0x59, 0x93, - 0x37, 0x5f, 0x4e, 0x6b, 0x75, 0x23, 0xdd, 0xcb, 0x95, 0xf4, 0x03, 0xe4, 0xcb, 0x9a, 0xae, 0x7d, - 0x9a, 0x59, 0x68, 0x35, 0x53, 0x71, 0x2e, 0x28, 0xc0, 0x7a, 0x50, 0xa4, 0x02, 0xb2, 0x10, 0x67, - 0x65, 0x1c, 0x91, 0xd7, 0xfb, 0x23, 0x6f, 0x05, 0xcc, 0x1c, 0x7e, 0xa5, 0xd5, 0x4c, 0x2d, 0x0a, - 0x10, 0x82, 0x0e, 0x11, 0x99, 0xfc, 0x4c, 0x82, 0x05, 0x9b, 0x7e, 0xd0, 0x30, 0x6c, 0xaa, 0x17, - 0x4d, 0x4b, 0xa7, 0x45, 0xcf, 0x99, 0x28, 0xaa, 0xbc, 0xde, 0x5f, 0x65, 0xc1, 0x93, 0xca, 0x5b, - 0x3a, 0x15, 0x1d, 0x53, 0x5b, 0xcd, 0xd4, 0x65, 0xbb, 0x6b, 0x33, 0x30, 0x40, 0x91, 0x0a, 0xa4, - 0x7b, 0x9f, 0x3c, 0x82, 0xc9, 0xba, 0xa5, 0x17, 0x9d, 0x3a, 0x2d, 0x2b, 0xa3, 0xab, 0xd2, 0x9a, - 0xbc, 0x79, 0x29, 0xcd, 0x13, 0x14, 0x6d, 0x60, 0x49, 0x9c, 0x3e, 0xb9, 0x9e, 0xde, 0xb5, 0xf4, - 0xbd, 0x3a, 0x2d, 0xe3, 0xf7, 0x9c, 0xab, 0xf3, 0x45, 0x08, 0x7b, 0xc2, 0x23, 0x92, 0x5d, 0x88, - 0xf9, 0x80, 0x8e, 0x32, 0x81, 0xee, 0x9c, 0x89, 0xc8, 0xd3, 0x8a, 0x2f, 0x9c, 0x50, 0x5a, 0x79, - 0x34, 0xb2, 0x0d, 0x13, 0x86, 0x59, 0xb1, 0xa9, 0xe3, 0x28, 0x31, 0xc4, 0x23, 0x08, 0x94, 0xe3, - 0xb4, 0x6d, 0xcb, 0x3c, 0x30, 0x2a, 0x99, 0x45, 0x66, 0x98, 0xc7, 0x26, 0xa0, 0xf8, 0x92, 0xe4, - 0x0e, 0x4c, 0x3a, 0xd4, 0x3e, 0x31, 0xca, 0xd4, 0x51, 0x40, 0x40, 0xd9, 0xe3, 0x44, 0x0f, 0x05, - 0x8d, 0xf1, 0xf9, 0x44, 0x63, 0x7c, 0x1a, 0xcb, 0x71, 0xa7, 0x7c, 0x48, 0xf5, 0x46, 0x95, 0xda, - 0x8a, 0x1c, 0xe4, 0x78, 0x9b, 0x28, 0xe6, 0x78, 0x9b, 0x48, 0x72, 0x30, 0xf7, 0x41, 0x83, 0x36, - 0x68, 0xd1, 0x75, 0xab, 0x45, 0x87, 0x96, 0x2d, 0x53, 0x77, 0x94, 0xa9, 0x55, 0x69, 0x2d, 0x92, - 0x79, 0xa1, 0xd5, 0x4c, 0xad, 0xe0, 0xe6, 0x63, 0xb7, 0xba, 0xc7, 0xb7, 0x04, 0x90, 0xd9, 0x8e, - 0xad, 0x84, 0x06, 0xb2, 0xf0, 0xe1, 0xc9, 0x8b, 0x10, 0x39, 0xa6, 0xfc, 0x8c, 0xc6, 0x32, 0x73, - 0xad, 0x66, 0x6a, 0xfa, 0x98, 0x8a, 0xc7, 0x93, 0xed, 0x92, 0x6b, 0x30, 0x7e, 0xa2, 0x55, 0x1b, - 0x14, 0x3f, 0x71, 0x2c, 0x33, 0xdf, 0x6a, 0xa6, 0x66, 0x91, 0x20, 0x30, 0x72, 0x8e, 0x5b, 0xa3, - 0x37, 0xa4, 0xc4, 0x01, 0xc4, 0x3b, 0x53, 0xfb, 0x42, 0xf4, 0xd4, 0x60, 0xb9, 0x4f, 0x3e, 0x5f, - 0x84, 0x3a, 0xf5, 0x5f, 0x11, 0x98, 0x0e, 0x65, 0x0d, 0xb9, 0x05, 0x63, 0xee, 0x69, 0x9d, 0xa2, - 0x9a, 0x99, 0xcd, 0xb8, 0x98, 0x57, 0x8f, 0x4f, 0xeb, 0x14, 0xcb, 0xc5, 0x0c, 0xe3, 0x08, 0xe5, - 0x3a, 0xca, 0x30, 0xe5, 0x75, 0xcb, 0x76, 0x1d, 0x65, 0x74, 0x35, 0xb2, 0x36, 0xcd, 0x95, 0x23, - 0x41, 0x54, 0x8e, 0x04, 0xf2, 0x7e, 0xb8, 0xae, 0x44, 0x30, 0xff, 0x5e, 0xec, 0xce, 0xe2, 0x67, - 0x2f, 0x28, 0x37, 0x41, 0x76, 0xab, 0x4e, 0x91, 0x9a, 0x5a, 0xa9, 0x4a, 0x75, 0x65, 0x6c, 0x55, - 0x5a, 0x9b, 0xcc, 0x28, 0xad, 0x66, 0x6a, 0xc1, 0x65, 0x11, 0x45, 0xaa, 0x20, 0x0b, 0x01, 0x15, - 0xcb, 0x2f, 0xb5, 0xdd, 0x22, 0x2b, 0xc8, 0xca, 0xb8, 0x50, 0x7e, 0xa9, 0xed, 0xe6, 0xb5, 0x1a, - 0x0d, 0x95, 0x5f, 0x8f, 0x46, 0x6e, 0xc3, 0x74, 0xc3, 0xa1, 0xc5, 0x72, 0xb5, 0xe1, 0xb8, 0xd4, - 0xce, 0xed, 0x2a, 0x51, 0xd4, 0x98, 0x68, 0x35, 0x53, 0x4b, 0x0d, 0x87, 0x6e, 0xfb, 0x74, 0x41, - 0x78, 0x4a, 0xa4, 0x7f, 0x5d, 0x29, 0xa6, 0xba, 0x30, 0x1d, 0x3a, 0xe2, 0xe4, 0x46, 0x8f, 0x4f, - 0xee, 0x71, 0xe0, 0x27, 0x27, 0xdd, 0x9f, 0xfc, 0xdc, 0x1f, 0x5c, 0xfd, 0xb3, 0x04, 0xf1, 0xce, - 0xf2, 0xcd, 0xe4, 0xf1, 0x2c, 0x7b, 0x0e, 0xa2, 0x3c, 0x12, 0x44, 0x79, 0x24, 0x90, 0xff, 0x05, - 0x38, 0xb2, 0x4a, 0x45, 0x87, 0xe2, 0x9d, 0x38, 0x1a, 0x7c, 0x94, 0x23, 0xab, 0xb4, 0x47, 0x3b, - 0xee, 0x44, 0x9f, 0x46, 0x74, 0x98, 0x63, 0x52, 0x36, 0xd7, 0x57, 0x64, 0x0c, 0x7e, 0xb2, 0xad, - 0xf4, 0xbd, 0x51, 0x78, 0xfd, 0x39, 0xb2, 0x4a, 0x02, 0x2d, 0x54, 0x7f, 0x3a, 0xb6, 0xd4, 0xdf, - 0x48, 0x30, 0x77, 0xdf, 0x2a, 0xed, 0xda, 0x94, 0x31, 0x7c, 0x6d, 0xce, 0xbd, 0x0a, 0x13, 0x4c, - 0xca, 0xd0, 0xb9, 0x4b, 0x31, 0x7e, 0x95, 0x1f, 0x59, 0xa5, 0x5c, 0xa8, 0x5c, 0x46, 0x39, 0x45, - 0xfd, 0x37, 0xff, 0x02, 0xdb, 0x9a, 0x59, 0xa6, 0x55, 0xdf, 0xc8, 0x75, 0x88, 0x72, 0x0c, 0xd1, - 0x4a, 0x14, 0x10, 0xad, 0x44, 0xc2, 0x33, 0x5a, 0xd9, 0x0e, 0x43, 0x64, 0x60, 0x18, 0x04, 0x87, - 0xc6, 0x06, 0x3b, 0x44, 0x5e, 0x81, 0xa8, 0x4d, 0x35, 0xc7, 0x32, 0xbd, 0x33, 0x8a, 0xdc, 0x9c, - 0x22, 0x72, 0x73, 0x8a, 0xfa, 0x77, 0x09, 0xe6, 0xef, 0xa3, 0x51, 0xe1, 0x08, 0x84, 0xbd, 0x92, - 0xce, 0xeb, 0xd5, 0xe8, 0x40, 0xaf, 0x6e, 0x43, 0xf4, 0xc0, 0xa8, 0xba, 0xd4, 0xc6, 0x08, 0xc8, - 0x9b, 0x73, 0xed, 0xc4, 0xa3, 0xee, 0x1d, 0xdc, 0xe0, 0x96, 0x73, 0x26, 0xd1, 0x72, 0x4e, 0x11, - 0xfc, 0x1c, 0x1b, 0xc2, 0xcf, 0xb7, 0x60, 0x4a, 0xc4, 0x26, 0xff, 0x0f, 0x51, 0xc7, 0xd5, 0x5c, - 0xea, 0x28, 0xd2, 0x6a, 0x64, 0x6d, 0x66, 0x73, 0xba, 0xad, 0x9e, 0x51, 0x39, 0x18, 0x67, 0x10, - 0xc1, 0x38, 0x45, 0xfd, 0x62, 0x16, 0x22, 0xf7, 0xad, 0x12, 0x59, 0x85, 0xd1, 0x76, 0x70, 0xe2, - 0xad, 0x66, 0x6a, 0xca, 0x10, 0xc3, 0x32, 0x6a, 0xe8, 0xe1, 0x96, 0x75, 0x7a, 0xc8, 0x96, 0xf5, - 0xc2, 0x33, 0x2a, 0xd4, 0x7f, 0x4f, 0x0c, 0xdd, 0x7f, 0x67, 0xda, 0xad, 0x34, 0x6f, 0xaf, 0x16, - 0xfc, 0x98, 0x9d, 0xa3, 0x73, 0x7e, 0x27, 0x7c, 0xc3, 0x41, 0xb8, 0xe8, 0x3c, 0xfb, 0xbd, 0x76, - 0xd2, 0xa7, 0x4f, 0x96, 0x51, 0xc1, 0x6a, 0x5b, 0xc1, 0xf3, 0x6e, 0x8b, 0xaf, 0xc1, 0xb8, 0xf5, - 0xc4, 0xa4, 0xb6, 0x37, 0x8f, 0x60, 0xd4, 0x91, 0x20, 0x46, 0x1d, 0x09, 0x84, 0xc2, 0x25, 0xde, - 0xda, 0xe1, 0xd2, 0x39, 0x34, 0xea, 0xc5, 0x86, 0x43, 0xed, 0x62, 0xc5, 0xb6, 0x1a, 0x75, 0x47, - 0x99, 0xc5, 0xb3, 0x7d, 0xb5, 0xd5, 0x4c, 0xa9, 0xc8, 0xf6, 0xc8, 0xe7, 0xda, 0x77, 0xa8, 0x7d, - 0x17, 0x79, 0x04, 0x4c, 0xa5, 0x1f, 0x0f, 0xf9, 0x89, 0x04, 0x57, 0xcb, 0x56, 0xad, 0xce, 0xba, - 0x05, 0xaa, 0x17, 0xcf, 0x52, 0x39, 0xbf, 0x2a, 0xad, 0x4d, 0x65, 0x5e, 0x6b, 0x35, 0x53, 0xaf, - 0x04, 0x12, 0x6f, 0x0f, 0x56, 0xae, 0x0e, 0xe6, 0x0e, 0xcd, 0x85, 0x63, 0x43, 0xce, 0x85, 0xe2, - 0x8c, 0x31, 0xfe, 0xdc, 0x67, 0x8c, 0xa9, 0xe7, 0x31, 0x63, 0xfc, 0x4a, 0x82, 0x55, 0xaf, 0x5b, - 0x37, 0xcc, 0x4a, 0xd1, 0xa6, 0x8e, 0xd5, 0xb0, 0xcb, 0xb4, 0xe8, 0xa5, 0x46, 0x8d, 0x9a, 0xae, - 0xa3, 0x2c, 0xa2, 0xed, 0x6b, 0xbd, 0x34, 0x15, 0x3c, 0x81, 0x82, 0xc0, 0x9f, 0xb9, 0xfa, 0x69, - 0x33, 0x35, 0xd2, 0x6a, 0xa6, 0x92, 0x01, 0x72, 0x2f, 0xbe, 0xc2, 0x80, 0x7d, 0x92, 0x83, 0x89, - 0xb2, 0x4d, 0x35, 0x97, 0xea, 0xd8, 0x66, 0xc9, 0x9b, 0x89, 0x34, 0x7f, 0x19, 0x48, 0xfb, 0x23, - 0x7f, 0xfa, 0xb1, 0xff, 0x10, 0x91, 0x99, 0xf7, 0x94, 0xfa, 0x22, 0x1f, 0x7d, 0x91, 0x92, 0x0a, - 0xfe, 0x42, 0x9c, 0xa5, 0x66, 0x9e, 0xcb, 0x2c, 0x15, 0xff, 0x0a, 0xb3, 0xd4, 0x77, 0x41, 0x3e, - 0xbe, 0xe1, 0x14, 0x7d, 0x83, 0xe6, 0x10, 0xea, 0x8a, 0x18, 0xde, 0xe0, 0x7d, 0x84, 0x05, 0xd9, - 0xb3, 0x92, 0xf7, 0xb5, 0xc7, 0x37, 0x9c, 0x5c, 0x97, 0x89, 0x10, 0x50, 0x59, 0x49, 0x62, 0xe8, - 0x9e, 0x36, 0x85, 0xf4, 0x4f, 0x13, 0xcf, 0xee, 0x36, 0xae, 0xb7, 0xee, 0xc0, 0xf5, 0xa8, 0xe1, - 0x09, 0x70, 0xe1, 0xab, 0x4d, 0x80, 0x4b, 0xff, 0x9d, 0x00, 0xbf, 0xd6, 0x09, 0xf0, 0x1f, 0x12, - 0x2c, 0xdd, 0x67, 0xfd, 0xac, 0x57, 0x9b, 0x8c, 0x1f, 0x50, 0xbf, 0x33, 0x12, 0xda, 0x31, 0x69, - 0x88, 0x76, 0xec, 0xc2, 0x2f, 0xf3, 0x37, 0x61, 0xca, 0xa4, 0x4f, 0x8a, 0x1d, 0xc5, 0x16, 0xef, - 0x4d, 0x93, 0x3e, 0xd9, 0xed, 0xae, 0xb7, 0xb2, 0x40, 0x56, 0x7f, 0x3b, 0x0a, 0xcb, 0x5d, 0x8e, - 0x3a, 0x75, 0xcb, 0x74, 0x28, 0xf9, 0xb5, 0x04, 0x8a, 0x1d, 0x6c, 0xe0, 0x27, 0x66, 0x15, 0xaf, - 0x51, 0x75, 0xb9, 0xef, 0xf2, 0xe6, 0x4d, 0xff, 0x62, 0xed, 0x05, 0x90, 0x2e, 0x74, 0x08, 0x17, - 0xb8, 0x2c, 0xbf, 0x71, 0x5f, 0x6e, 0x35, 0x53, 0x57, 0xec, 0xde, 0x1c, 0x82, 0xb5, 0xcb, 0x7d, - 0x58, 0x12, 0x36, 0x5c, 0x3e, 0x0b, 0xff, 0x42, 0xd2, 0xc2, 0x84, 0x45, 0x61, 0x34, 0xe2, 0x5e, - 0xe2, 0x2b, 0xe8, 0x79, 0x06, 0x86, 0x6b, 0x30, 0x4e, 0x6d, 0xdb, 0xb2, 0x45, 0x9d, 0x48, 0x10, - 0x59, 0x91, 0xa0, 0x7e, 0x88, 0x13, 0x54, 0x58, 0x1f, 0x39, 0x04, 0xc2, 0xa7, 0x37, 0xbe, 0xf6, - 0xc6, 0x37, 0xfe, 0x3d, 0x12, 0x9d, 0xe3, 0x5b, 0x60, 0x63, 0x26, 0xd9, 0x6a, 0xa6, 0x12, 0x38, - 0xa4, 0x05, 0x44, 0x31, 0xd2, 0xf1, 0xce, 0x3d, 0xf5, 0x4f, 0x93, 0x30, 0x8e, 0x17, 0x3c, 0xb9, - 0x0a, 0x63, 0x38, 0xf6, 0x73, 0xef, 0x70, 0xf4, 0x35, 0xc3, 0x23, 0x3f, 0xee, 0x93, 0x2c, 0xcc, - 0xfa, 0x89, 0x58, 0x3c, 0xd0, 0xca, 0xae, 0xe7, 0xa5, 0x94, 0xb9, 0xdc, 0x6a, 0xa6, 0x14, 0x7f, - 0xeb, 0x0e, 0xee, 0x08, 0xc2, 0x33, 0xe1, 0x1d, 0x72, 0x13, 0x64, 0xec, 0x53, 0x78, 0xdb, 0xe2, - 0xcd, 0x71, 0x58, 0x75, 0x19, 0x99, 0xb7, 0x1b, 0x62, 0xd5, 0x0d, 0xa8, 0xec, 0x38, 0x60, 0x77, - 0xe3, 0xcb, 0xf2, 0x91, 0x09, 0x8f, 0x03, 0xd2, 0xbb, 0x84, 0x65, 0x81, 0x4c, 0x2a, 0x30, 0xdb, - 0xbe, 0xd2, 0xab, 0x46, 0xcd, 0x70, 0xfd, 0xc7, 0xdd, 0x24, 0x06, 0x16, 0x83, 0xd1, 0xbe, 0xc3, - 0x1f, 0x20, 0x03, 0xcf, 0x66, 0x16, 0x5c, 0xc5, 0x0e, 0x6d, 0x84, 0x5a, 0x92, 0x99, 0xf0, 0x1e, - 0xf9, 0x9d, 0x04, 0x57, 0x3b, 0x34, 0x15, 0x4b, 0xa7, 0xed, 0x53, 0x5c, 0x2c, 0x57, 0x35, 0xc7, - 0xe1, 0x4f, 0x2d, 0x13, 0xc2, 0x53, 0x6f, 0x2f, 0x03, 0x32, 0xa7, 0xfe, 0x69, 0xde, 0x66, 0x42, - 0x79, 0xad, 0x46, 0xb9, 0x4d, 0xd7, 0xbc, 0x1b, 0xfe, 0x8a, 0x3d, 0x88, 0xbf, 0x30, 0x98, 0x85, - 0xec, 0x81, 0x5c, 0xa7, 0x76, 0xcd, 0x70, 0x1c, 0xec, 0xdc, 0xf9, 0x03, 0xf4, 0x92, 0x60, 0xd5, - 0x6e, 0xb0, 0xcb, 0xe3, 0x2d, 0xb0, 0x8b, 0xf1, 0x16, 0xc8, 0x89, 0x7f, 0x4a, 0x20, 0x0b, 0x72, - 0xa4, 0x00, 0x93, 0x4e, 0xa3, 0x74, 0x44, 0xcb, 0xed, 0x0a, 0x93, 0xec, 0xad, 0x21, 0xbd, 0xc7, - 0xd9, 0xbc, 0xee, 0xc1, 0x93, 0x09, 0x75, 0x0f, 0x1e, 0x0d, 0xcf, 0x38, 0xb5, 0x4b, 0xfc, 0x39, - 0xc6, 0x3f, 0xe3, 0x8c, 0x10, 0x3a, 0xe3, 0x8c, 0x90, 0x78, 0x0f, 0x26, 0x3c, 0x5c, 0x96, 0xf1, - 0xc7, 0x86, 0xa9, 0x8b, 0x19, 0xcf, 0xd6, 0x62, 0xc6, 0xb3, 0x75, 0xfb, 0x64, 0x8c, 0x9e, 0x7d, - 0x32, 0x12, 0x06, 0xcc, 0xf7, 0xc8, 0x9b, 0x67, 0xa8, 0x52, 0xd2, 0xc0, 0xbb, 0xf2, 0x63, 0x09, - 0xae, 0x0e, 0x97, 0x22, 0xc3, 0xa9, 0x7f, 0x4b, 0x54, 0xef, 0x0f, 0x53, 0x21, 0xc0, 0x0e, 0x6d, - 0x83, 0xca, 0xe8, 0x4f, 0xc7, 0xe1, 0xd2, 0x19, 0xf2, 0xac, 0xc9, 0x5e, 0xa9, 0x69, 0xdf, 0x37, - 0x6a, 0x8d, 0x5a, 0xd0, 0x61, 0x1f, 0xd8, 0x5a, 0x99, 0x15, 0x79, 0x2f, 0x2f, 0xbe, 0x35, 0xc8, - 0x8a, 0xf4, 0x43, 0x8e, 0xe0, 0x53, 0xef, 0x78, 0xf2, 0xfc, 0x6c, 0xa4, 0xbc, 0xb3, 0xb1, 0x5c, - 0xeb, 0xcd, 0x55, 0xe8, 0xb7, 0x41, 0x7e, 0x2f, 0xc1, 0x95, 0xbe, 0xc6, 0xe1, 0x19, 0xb6, 0xac, - 0x2a, 0xe6, 0x9a, 0xbc, 0xb9, 0xfd, 0xac, 0x46, 0x66, 0x4e, 0x77, 0x2d, 0xab, 0xea, 0x5d, 0x94, - 0x9e, 0xa9, 0x2f, 0xd4, 0xce, 0xe2, 0x2d, 0x9c, 0xbd, 0xcd, 0xae, 0xcb, 0xb3, 0x02, 0x72, 0x51, - 0x89, 0xa8, 0x0e, 0x76, 0x70, 0x38, 0xd5, 0x8f, 0xc2, 0x49, 0xf8, 0x52, 0x77, 0x64, 0x31, 0x0a, - 0xe7, 0x4b, 0xc4, 0x3f, 0x8c, 0x42, 0x6a, 0x00, 0x06, 0xf9, 0x78, 0x88, 0x64, 0xdc, 0x1a, 0xc6, - 0x9a, 0x0b, 0x4a, 0xc8, 0x6f, 0xe2, 0xcb, 0xaa, 0x59, 0x88, 0x61, 0x49, 0x7e, 0x60, 0x38, 0x2e, - 0xb9, 0x01, 0x51, 0x6c, 0x45, 0xfd, 0x92, 0x0d, 0x41, 0xc9, 0xe6, 0xcd, 0x31, 0xdf, 0x15, 0x9b, - 0x63, 0x4e, 0x51, 0xf7, 0x81, 0xf0, 0x67, 0xc7, 0xaa, 0xd0, 0xbf, 0x91, 0xdb, 0x30, 0x5d, 0xe6, - 0x54, 0xaa, 0x0b, 0x7d, 0x36, 0xfe, 0x66, 0xd0, 0xde, 0x08, 0x77, 0xdb, 0x53, 0x22, 0x5d, 0xbd, - 0x09, 0xb3, 0xa8, 0xfd, 0x2e, 0x6d, 0x3f, 0x3b, 0x0f, 0xd9, 0xc0, 0xa8, 0xb7, 0x41, 0xd9, 0x73, - 0x6d, 0xaa, 0xd5, 0x0c, 0xb3, 0xd2, 0x89, 0xf1, 0x22, 0x44, 0xcc, 0x46, 0x0d, 0x21, 0xa6, 0x79, - 0x20, 0xcd, 0x46, 0x4d, 0x0c, 0xa4, 0xd9, 0xa8, 0xa9, 0x6f, 0x02, 0x41, 0xb9, 0x1d, 0x5a, 0xa5, - 0x2e, 0x3d, 0xaf, 0xfa, 0x4f, 0x24, 0x00, 0xfe, 0x4e, 0x99, 0x33, 0x0f, 0xac, 0xa1, 0xdb, 0xae, - 0x9b, 0x20, 0x63, 0x44, 0xf5, 0xe2, 0x91, 0x85, 0x17, 0x9d, 0xb4, 0x36, 0xce, 0xfb, 0x25, 0x4e, - 0xbe, 0x6f, 0x85, 0x6e, 0x3b, 0x08, 0xa8, 0x4c, 0xb4, 0x4a, 0x35, 0xc7, 0x17, 0x8d, 0x04, 0xa2, - 0x9c, 0xdc, 0x29, 0x1a, 0x50, 0xd5, 0x27, 0x30, 0x8f, 0xae, 0xee, 0xd7, 0x75, 0xcd, 0x0d, 0xc6, - 0x86, 0x37, 0xc4, 0x17, 0xfe, 0x70, 0x36, 0x9c, 0x35, 0xc7, 0x9c, 0xa3, 0x2d, 0x6e, 0x80, 0x92, - 0xd1, 0xdc, 0xf2, 0x61, 0x2f, 0xed, 0xef, 0xc1, 0xf4, 0x81, 0x66, 0x54, 0xfd, 0x97, 0x2f, 0x3f, - 0x27, 0x95, 0xc0, 0x8a, 0xb0, 0x00, 0x4f, 0x2b, 0x2e, 0xf2, 0x76, 0x67, 0x9e, 0x4e, 0x89, 0xf4, - 0xb6, 0xbf, 0xdb, 0xf8, 0x46, 0xf2, 0x4d, 0xf9, 0xdb, 0xa1, 0x7d, 0xb0, 0xbf, 0x61, 0x81, 0x73, - 0xf8, 0x2b, 0x43, 0x2c, 0x6b, 0xea, 0x0f, 0x35, 0xfb, 0x98, 0xda, 0xea, 0x47, 0x12, 0x2c, 0x86, - 0x4f, 0xc6, 0x43, 0xea, 0x38, 0x5a, 0x85, 0x92, 0xff, 0x3b, 0x9f, 0xff, 0xf7, 0x46, 0x82, 0x67, - 0xe8, 0x08, 0x35, 0x75, 0xaf, 0xa0, 0xcf, 0xa0, 0x58, 0x5b, 0x1f, 0x3f, 0x5f, 0x54, 0x6c, 0xb8, - 0xee, 0x8d, 0x14, 0x18, 0x7f, 0x66, 0x02, 0xc6, 0xe9, 0x09, 0x35, 0xdd, 0xf5, 0x04, 0xc8, 0xc2, - 0x6f, 0xb0, 0x44, 0x86, 0x09, 0x6f, 0x19, 0x1f, 0x59, 0xbf, 0x06, 0xb2, 0xf0, 0x63, 0x1d, 0x99, - 0x82, 0xc9, 0xbc, 0xa5, 0xd3, 0x5d, 0xcb, 0x76, 0xe3, 0x23, 0x6c, 0x75, 0x8f, 0x6a, 0x7a, 0x95, - 0xb1, 0x4a, 0xeb, 0xbf, 0x90, 0x60, 0xd2, 0x7f, 0xf8, 0x27, 0x00, 0xd1, 0xb7, 0xf7, 0xb3, 0xfb, - 0xd9, 0x9d, 0xf8, 0x08, 0x03, 0xdc, 0xcd, 0xe6, 0x77, 0x72, 0xf9, 0xbb, 0x71, 0x89, 0x2d, 0x0a, - 0xfb, 0xf9, 0x3c, 0x5b, 0x8c, 0x92, 0x69, 0x88, 0xed, 0xed, 0x6f, 0x6f, 0x67, 0xb3, 0x3b, 0xd9, - 0x9d, 0x78, 0x84, 0x09, 0xdd, 0xd9, 0xca, 0x3d, 0xc8, 0xee, 0xc4, 0xc7, 0x18, 0xdf, 0x7e, 0xfe, - 0xad, 0xfc, 0xa3, 0x77, 0xf3, 0xf1, 0x71, 0xce, 0x97, 0x79, 0x98, 0x7b, 0xfc, 0x38, 0xbb, 0x13, - 0x8f, 0x32, 0xbe, 0x07, 0xd9, 0xad, 0xbd, 0xec, 0x4e, 0x7c, 0x82, 0x6d, 0xed, 0x16, 0xb2, 0xd9, - 0x87, 0xbb, 0x6c, 0x6b, 0x92, 0x2d, 0xb7, 0xb7, 0xf2, 0xdb, 0xd9, 0x07, 0x0c, 0x25, 0xb6, 0xf9, - 0xf3, 0x18, 0x44, 0xf9, 0x08, 0x47, 0xde, 0x01, 0xe0, 0x7f, 0xe1, 0x71, 0x5d, 0xec, 0xf9, 0xfb, - 0x5c, 0x62, 0xa9, 0xf7, 0xdc, 0xa7, 0xae, 0xfc, 0xf8, 0x8f, 0x7f, 0xfb, 0xe5, 0xe8, 0xbc, 0x3a, - 0xb3, 0x71, 0x72, 0x7d, 0xe3, 0xc8, 0x2a, 0x79, 0xff, 0xc8, 0x74, 0x4b, 0x5a, 0x27, 0xef, 0x02, - 0xf0, 0xda, 0x1b, 0xc6, 0x0d, 0xfd, 0x0c, 0x94, 0x58, 0x46, 0x72, 0x77, 0x8d, 0xee, 0x06, 0xe6, - 0x05, 0x98, 0x01, 0x7f, 0x0f, 0xa6, 0xda, 0xc0, 0x7b, 0xd4, 0x25, 0x8a, 0xf0, 0xcb, 0x4e, 0x18, - 0x7d, 0xa9, 0xeb, 0x79, 0x32, 0xcb, 0x3e, 0xb4, 0x7a, 0x19, 0xc1, 0x97, 0xd4, 0x39, 0x0f, 0xdc, - 0xa1, 0xae, 0x80, 0x6f, 0x42, 0x5c, 0x7c, 0x6d, 0x40, 0xf3, 0x2f, 0xf5, 0x7e, 0x87, 0xe0, 0x6a, - 0x2e, 0x9f, 0xf5, 0x48, 0xa1, 0xa6, 0x50, 0xd9, 0x8a, 0xba, 0xe0, 0x7b, 0x22, 0x3c, 0x38, 0x50, - 0xa6, 0xef, 0x3d, 0x90, 0xbd, 0xdf, 0x30, 0x51, 0x55, 0x3b, 0xd4, 0xe1, 0x1f, 0x36, 0xfb, 0x3a, - 0x93, 0x40, 0xfc, 0x05, 0x75, 0xd6, 0xc7, 0xaf, 0x73, 0x39, 0x06, 0x7d, 0x17, 0x64, 0x7e, 0x3a, - 0xf9, 0x94, 0x2d, 0x1c, 0x9d, 0xbe, 0x70, 0x0b, 0x08, 0x37, 0xa3, 0xc6, 0x18, 0x1c, 0x9e, 0x23, - 0x06, 0x54, 0x86, 0x29, 0x01, 0xc8, 0x21, 0x33, 0x01, 0x12, 0xbb, 0xa2, 0x13, 0x2f, 0xe0, 0xba, - 0x5f, 0x11, 0x51, 0x5f, 0x42, 0xd0, 0xa4, 0xba, 0xc2, 0x40, 0x4b, 0x8c, 0x8b, 0xea, 0x1b, 0xfc, - 0x29, 0xd8, 0x2b, 0x2b, 0x4c, 0x49, 0x1e, 0x64, 0x5e, 0x3b, 0x87, 0xb7, 0xf6, 0x12, 0x02, 0x2f, - 0x26, 0xe2, 0x6d, 0x6b, 0x37, 0x7e, 0xc8, 0x6e, 0xac, 0x0f, 0x3d, 0xa3, 0x05, 0xbc, 0xc1, 0x46, - 0x87, 0x0b, 0xb7, 0x6f, 0x74, 0x22, 0x64, 0x74, 0x03, 0x79, 0x04, 0xa3, 0xbf, 0x0d, 0x32, 0xbf, - 0x8a, 0xb9, 0xd1, 0xcb, 0x81, 0x8e, 0xd0, 0x0d, 0xdd, 0xd7, 0x03, 0x05, 0xb5, 0x90, 0xf5, 0x2e, - 0x0f, 0xc8, 0x1d, 0x98, 0xbc, 0x4b, 0x5d, 0x0e, 0xbb, 0x10, 0xc0, 0x06, 0x0d, 0x43, 0x42, 0x88, - 0x90, 0x8f, 0x43, 0xba, 0x71, 0x74, 0x88, 0xf9, 0x38, 0x0e, 0xe1, 0x3e, 0xf7, 0x6b, 0x41, 0x12, - 0x89, 0x1e, 0xdb, 0x5e, 0x1d, 0xf6, 0x13, 0x8d, 0x10, 0x31, 0x1e, 0x3c, 0x10, 0xaf, 0x49, 0xe4, - 0x16, 0x44, 0xef, 0xe1, 0x3f, 0xf4, 0x91, 0x3e, 0x9e, 0x26, 0xf8, 0x39, 0xe5, 0x4c, 0xdb, 0x87, - 0xb4, 0x7c, 0xdc, 0xbe, 0x32, 0xde, 0xff, 0xfc, 0xaf, 0xc9, 0x91, 0x1f, 0x3d, 0x4d, 0x4a, 0x9f, - 0x3e, 0x4d, 0x4a, 0x9f, 0x3d, 0x4d, 0x4a, 0x7f, 0x79, 0x9a, 0x94, 0x3e, 0xfa, 0x32, 0x39, 0xf2, - 0xd9, 0x97, 0xc9, 0x91, 0xcf, 0xbf, 0x4c, 0x8e, 0x7c, 0xe7, 0x7f, 0x84, 0xff, 0x31, 0xd4, 0xec, - 0x9a, 0xa6, 0x6b, 0x75, 0xdb, 0x62, 0x73, 0xb4, 0xb7, 0xda, 0xf0, 0xfe, 0xa9, 0xf0, 0x93, 0xd1, - 0x85, 0x2d, 0x24, 0xec, 0xf2, 0xed, 0x74, 0xce, 0x4a, 0x6f, 0xd5, 0x8d, 0x52, 0x14, 0x6d, 0x79, - 0xfd, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x8a, 0x17, 0x1c, 0x71, 0x29, 0x00, 0x00, + 0xd5, 0xe7, 0x88, 0x7a, 0xf1, 0x50, 0x0f, 0xea, 0xea, 0xe1, 0x11, 0xed, 0x88, 0xf2, 0x24, 0xf1, + 0x27, 0x0b, 0x09, 0x15, 0x2b, 0x5f, 0x50, 0xdb, 0x4d, 0xe1, 0x8a, 0x12, 0x6d, 0x4b, 0xb1, 0x65, + 0x85, 0xb2, 0x92, 0xa6, 0x28, 0xca, 0x0c, 0x39, 0x57, 0xd4, 0x48, 0xe4, 0x0c, 0x33, 0x33, 0x94, + 0xab, 0x16, 0x01, 0x8a, 0xa2, 0x68, 0x17, 0xdd, 0xa4, 0xe8, 0xae, 0x05, 0xd2, 0x45, 0x77, 0xe9, + 0xa2, 0x9b, 0xa2, 0xe8, 0x9f, 0x90, 0x65, 0x80, 0xa2, 0x40, 0xba, 0x61, 0x5a, 0xbb, 0x0f, 0x80, + 0xbb, 0xee, 0xbb, 0x28, 0xee, 0xb9, 0x77, 0x38, 0x77, 0xf8, 0x10, 0x45, 0xdb, 0x4a, 0x36, 0xdd, + 0xe9, 0xfe, 0xee, 0x79, 0xcf, 0xb9, 0xe7, 0x9e, 0x73, 0x29, 0x98, 0xa9, 0x1e, 0x95, 0x56, 0xf4, + 0xaa, 0xb9, 0xe2, 0xd6, 0x0a, 0x15, 0xd3, 0x4b, 0x57, 0x1d, 0xdb, 0xb3, 0x49, 0x54, 0xaf, 0x9a, + 0xc9, 0x8b, 0x25, 0xdb, 0x2e, 0x95, 0xe9, 0x0a, 0x42, 0x85, 0xda, 0xfe, 0x0a, 0xad, 0x54, 0xbd, + 0x13, 0x4e, 0x91, 0x4c, 0xb5, 0x6e, 0x7a, 0x66, 0x85, 0xba, 0x9e, 0x5e, 0xa9, 0x0a, 0x02, 0xed, + 0xe8, 0xba, 0x9b, 0x36, 0x6d, 0x94, 0x5d, 0xb4, 0x1d, 0xba, 0x72, 0x7c, 0x6d, 0xa5, 0x44, 0x2d, + 0xea, 0xe8, 0x1e, 0x35, 0x04, 0xcd, 0x92, 0x44, 0x63, 0x51, 0xef, 0x91, 0xed, 0x1c, 0x99, 0x56, + 0xa9, 0x13, 0xe5, 0x25, 0xa1, 0x8e, 0x51, 0xea, 0x96, 0x65, 0x7b, 0xba, 0x67, 0xda, 0x96, 0x2b, + 0x76, 0x5f, 0x2d, 0x99, 0xde, 0x41, 0xad, 0x90, 0x2e, 0xda, 0x95, 0x95, 0x92, 0x5d, 0xb2, 0x03, + 0xab, 0xd8, 0x0a, 0x17, 0xf8, 0x97, 0x20, 0x6f, 0xfa, 0x7c, 0x40, 0xf5, 0xb2, 0x77, 0xc0, 0x51, + 0xed, 0xd7, 0x31, 0x98, 0xd9, 0xb2, 0x0b, 0xbb, 0x18, 0x87, 0x1c, 0xfd, 0xa0, 0x46, 0x5d, 0x6f, + 0xd3, 0xa3, 0x15, 0xb2, 0x0a, 0xa3, 0x55, 0xc7, 0xb4, 0x1d, 0xd3, 0x3b, 0x51, 0x95, 0x45, 0x65, + 0x49, 0xc9, 0xcc, 0x35, 0xea, 0x29, 0xe2, 0x63, 0xaf, 0xd8, 0x15, 0xd3, 0xc3, 0xd0, 0xe4, 0x9a, + 0x74, 0xe4, 0x0d, 0x88, 0x59, 0x7a, 0x85, 0xba, 0x55, 0xbd, 0x48, 0xd5, 0xe8, 0xa2, 0xb2, 0x14, + 0xcb, 0x5c, 0x68, 0xd4, 0x53, 0xd3, 0x4d, 0x50, 0xe2, 0x0a, 0x28, 0xc9, 0xeb, 0x10, 0x2b, 0x96, + 0x4d, 0x6a, 0x79, 0x79, 0xd3, 0x50, 0x47, 0x91, 0x0d, 0x75, 0x71, 0x70, 0xd3, 0x90, 0x75, 0xf9, + 0x18, 0xd9, 0x85, 0xe1, 0xb2, 0x5e, 0xa0, 0x65, 0x57, 0x1d, 0x5c, 0x8c, 0x2e, 0xc5, 0x57, 0x5f, + 0x4e, 0xeb, 0x55, 0x33, 0xdd, 0xc9, 0x95, 0xf4, 0x3d, 0xa4, 0xcb, 0x5a, 0x9e, 0x73, 0x92, 0x99, + 0x69, 0xd4, 0x53, 0x09, 0xce, 0x28, 0x89, 0x15, 0xa2, 0x48, 0x09, 0xe2, 0x52, 0x9c, 0xd5, 0x21, + 0x94, 0xbc, 0xdc, 0x5d, 0xf2, 0x5a, 0x40, 0xcc, 0xc5, 0xcf, 0x37, 0xea, 0xa9, 0x59, 0x49, 0x84, + 0xa4, 0x43, 0x96, 0x4c, 0x7e, 0xaa, 0xc0, 0x8c, 0x43, 0x3f, 0xa8, 0x99, 0x0e, 0x35, 0xf2, 0x96, + 0x6d, 0xd0, 0xbc, 0x70, 0x66, 0x18, 0x55, 0x5e, 0xeb, 0xae, 0x32, 0x27, 0xb8, 0xb6, 0x6d, 0x83, + 0xca, 0x8e, 0x69, 0x8d, 0x7a, 0xea, 0x92, 0xd3, 0xb6, 0x19, 0x18, 0xa0, 0x2a, 0x39, 0xd2, 0xbe, + 0x4f, 0x1e, 0xc0, 0x68, 0xd5, 0x36, 0xf2, 0x6e, 0x95, 0x16, 0xd5, 0x81, 0x45, 0x65, 0x29, 0xbe, + 0x7a, 0x31, 0xcd, 0x13, 0x14, 0x6d, 0x60, 0x49, 0x9c, 0x3e, 0xbe, 0x96, 0xde, 0xb1, 0x8d, 0xdd, + 0x2a, 0x2d, 0xe2, 0xf7, 0x9c, 0xaa, 0xf2, 0x45, 0x48, 0xf6, 0x88, 0x00, 0xc9, 0x0e, 0xc4, 0x7c, + 0x81, 0xae, 0x3a, 0x82, 0xee, 0x9c, 0x2a, 0x91, 0xa7, 0x15, 0x5f, 0xb8, 0xa1, 0xb4, 0x12, 0x18, + 0x59, 0x87, 0x11, 0xd3, 0x2a, 0x39, 0xd4, 0x75, 0xd5, 0x18, 0xca, 0x23, 0x28, 0x68, 0x93, 0x63, + 0xeb, 0xb6, 0xb5, 0x6f, 0x96, 0x32, 0xb3, 0xcc, 0x30, 0x41, 0x26, 0x49, 0xf1, 0x39, 0xc9, 0x6d, + 0x18, 0x75, 0xa9, 0x73, 0x6c, 0x16, 0xa9, 0xab, 0x82, 0x24, 0x65, 0x97, 0x83, 0x42, 0x0a, 0x1a, + 0xe3, 0xd3, 0xc9, 0xc6, 0xf8, 0x18, 0xcb, 0x71, 0xb7, 0x78, 0x40, 0x8d, 0x5a, 0x99, 0x3a, 0x6a, + 0x3c, 0xc8, 0xf1, 0x26, 0x28, 0xe7, 0x78, 0x13, 0x4c, 0xea, 0x10, 0x97, 0xbe, 0x16, 0x79, 0x11, + 0xa2, 0x47, 0x94, 0x1f, 0xac, 0x58, 0x66, 0xaa, 0x51, 0x4f, 0x8d, 0x1f, 0x51, 0xf9, 0x4c, 0xb1, + 0x5d, 0x72, 0x15, 0x86, 0x8e, 0xf5, 0x72, 0x8d, 0xe2, 0x77, 0x89, 0x65, 0xa6, 0x1b, 0xf5, 0xd4, + 0x24, 0x02, 0x12, 0x21, 0xa7, 0xb8, 0x39, 0x70, 0x5d, 0x49, 0xee, 0x43, 0xa2, 0x35, 0x1f, 0xcf, + 0x45, 0x4f, 0x05, 0x2e, 0x74, 0x49, 0xc2, 0xf3, 0x50, 0xa7, 0xfd, 0x3b, 0x0a, 0xe3, 0xa1, 0x4f, + 0x4d, 0x6e, 0xc2, 0xa0, 0x77, 0x52, 0xa5, 0xa8, 0x66, 0x62, 0x35, 0x21, 0x27, 0xc3, 0xc3, 0x93, + 0x2a, 0xc5, 0x33, 0x3e, 0xc1, 0x28, 0x42, 0x09, 0x8a, 0x3c, 0x4c, 0x79, 0xd5, 0x76, 0x3c, 0x57, + 0x1d, 0x58, 0x8c, 0x2e, 0x8d, 0x73, 0xe5, 0x08, 0xc8, 0xca, 0x11, 0x20, 0xef, 0x87, 0x8b, 0x41, + 0x14, 0x93, 0xe6, 0xc5, 0xf6, 0xd4, 0x7b, 0xfa, 0x2a, 0x70, 0x03, 0xe2, 0x5e, 0xd9, 0xcd, 0x53, + 0x4b, 0x2f, 0x94, 0xa9, 0xa1, 0x0e, 0x2e, 0x2a, 0x4b, 0xa3, 0x19, 0xb5, 0x51, 0x4f, 0xcd, 0x78, + 0x2c, 0xa2, 0x88, 0x4a, 0xbc, 0x10, 0xa0, 0x58, 0x33, 0xa9, 0xe3, 0xe5, 0x59, 0x15, 0x55, 0x87, + 0xa4, 0x9a, 0x49, 0x1d, 0x6f, 0x5b, 0xaf, 0xd0, 0x50, 0xcd, 0x14, 0x18, 0xb9, 0x05, 0xe3, 0x35, + 0x97, 0xe6, 0x8b, 0xe5, 0x9a, 0xeb, 0x51, 0x67, 0x73, 0x47, 0x1d, 0x46, 0x8d, 0xc9, 0x46, 0x3d, + 0x35, 0x57, 0x73, 0xe9, 0xba, 0x8f, 0x4b, 0xcc, 0x63, 0x32, 0xfe, 0x65, 0xa5, 0x98, 0xe6, 0xc1, + 0x78, 0xe8, 0x5c, 0x92, 0xeb, 0x1d, 0x3e, 0xb9, 0xa0, 0xc0, 0x4f, 0x4e, 0xda, 0x3f, 0x79, 0xdf, + 0x1f, 0x5c, 0xfb, 0x8b, 0x02, 0x89, 0xd6, 0x9a, 0xcb, 0xf8, 0x3f, 0xa8, 0xd1, 0x1a, 0x15, 0x0e, + 0x22, 0x3f, 0x02, 0x32, 0x3f, 0x02, 0xe4, 0xff, 0x01, 0x0e, 0xed, 0x42, 0xde, 0xa5, 0x78, 0x91, + 0x0d, 0x04, 0x1f, 0xe5, 0xd0, 0x2e, 0xec, 0xd2, 0x96, 0x8b, 0xcc, 0xc7, 0x88, 0x01, 0x53, 0x8c, + 0xcb, 0xe1, 0xfa, 0xf2, 0x8c, 0xc0, 0x4f, 0xb6, 0xf9, 0xae, 0xd7, 0x40, 0xe6, 0x85, 0x46, 0x3d, + 0x35, 0x7f, 0x68, 0x17, 0x24, 0x4c, 0xf6, 0x68, 0xb2, 0x65, 0x4b, 0xfb, 0x8d, 0x02, 0x53, 0x5b, + 0x76, 0x61, 0xc7, 0xa1, 0x8c, 0xe0, 0x4b, 0x73, 0xee, 0x55, 0x18, 0x61, 0x5c, 0xa6, 0xc1, 0x5d, + 0x8a, 0xf1, 0xfb, 0xf7, 0xd0, 0x2e, 0x6c, 0x1a, 0xa1, 0xfb, 0x97, 0x23, 0xda, 0x7f, 0xf8, 0x17, + 0x58, 0xd7, 0xad, 0x22, 0x2d, 0xfb, 0x46, 0x2e, 0xc3, 0x30, 0x97, 0x21, 0x5b, 0x89, 0x0c, 0xb2, + 0x95, 0x08, 0x3c, 0xa5, 0x95, 0xcd, 0x30, 0x44, 0x7b, 0x86, 0x41, 0x72, 0x68, 0xb0, 0xb7, 0x43, + 0xe4, 0x15, 0x18, 0x76, 0xa8, 0xee, 0xda, 0x96, 0x38, 0xa3, 0x48, 0xcd, 0x11, 0x99, 0x9a, 0x23, + 0xda, 0x3f, 0x14, 0x98, 0xde, 0x42, 0xa3, 0xc2, 0x11, 0x08, 0x7b, 0xa5, 0xf4, 0xeb, 0xd5, 0x40, + 0x4f, 0xaf, 0x6e, 0xc1, 0xf0, 0xbe, 0x59, 0xf6, 0xa8, 0x83, 0x11, 0x88, 0xaf, 0x4e, 0x35, 0x13, + 0x8f, 0x7a, 0xb7, 0x71, 0x83, 0x5b, 0xce, 0x89, 0x64, 0xcb, 0x39, 0x22, 0xf9, 0x39, 0x78, 0x06, + 0x3f, 0xdf, 0x82, 0x31, 0x59, 0x36, 0xf9, 0x3a, 0x0c, 0xbb, 0x9e, 0xee, 0x51, 0x57, 0x55, 0x16, + 0xa3, 0x4b, 0x13, 0xab, 0xe3, 0x4d, 0xf5, 0x0c, 0xe5, 0xc2, 0x38, 0x81, 0x2c, 0x8c, 0x23, 0xda, + 0x17, 0x93, 0x10, 0xdd, 0xb2, 0x0b, 0x64, 0x11, 0x06, 0x9a, 0xc1, 0x49, 0x34, 0xea, 0xa9, 0x31, + 0x53, 0x0e, 0xcb, 0x80, 0x69, 0x84, 0xfb, 0xcc, 0xf1, 0x33, 0xf6, 0x99, 0xe7, 0x9e, 0x51, 0xa1, + 0xa6, 0x79, 0xe4, 0xcc, 0x4d, 0x73, 0xa6, 0xd9, 0xff, 0xf2, 0x9e, 0x68, 0xc6, 0x8f, 0x59, 0x1f, + 0xed, 0xee, 0x3b, 0xe1, 0x1b, 0x0e, 0xc2, 0x45, 0xe7, 0xe9, 0xef, 0xb5, 0xe3, 0x2e, 0xcd, 0x6d, + 0x1c, 0x15, 0x2c, 0x36, 0x15, 0x3c, 0xef, 0x5e, 0xf6, 0x2a, 0x0c, 0xd9, 0x8f, 0x2c, 0xea, 0x88, + 0x21, 0x02, 0xa3, 0x8e, 0x80, 0x1c, 0x75, 0x04, 0x08, 0x85, 0x8b, 0x18, 0xfe, 0x3c, 0x2e, 0xdd, + 0x03, 0xb3, 0x9a, 0xaf, 0xb9, 0xd4, 0xc9, 0x97, 0x1c, 0xbb, 0x56, 0x75, 0xd5, 0x49, 0x3c, 0xdb, + 0x57, 0x1a, 0xf5, 0x94, 0x86, 0x64, 0x0f, 0x7c, 0xaa, 0x3d, 0x97, 0x3a, 0x77, 0x90, 0x46, 0x92, + 0xa9, 0x76, 0xa3, 0x21, 0x3f, 0x56, 0xe0, 0x4a, 0xd1, 0xae, 0x54, 0x59, 0xb7, 0x40, 0x8d, 0xfc, + 0x69, 0x2a, 0xa7, 0x17, 0x95, 0xa5, 0xb1, 0xcc, 0x6b, 0x8d, 0x7a, 0xea, 0x95, 0x80, 0xe3, 0xed, + 0xde, 0xca, 0xb5, 0xde, 0xd4, 0xa1, 0x61, 0x6e, 0xf0, 0x8c, 0xc3, 0x9c, 0x3c, 0x18, 0x0c, 0x3d, + 0xf7, 0xc1, 0x60, 0xec, 0x79, 0x0c, 0x06, 0xbf, 0x54, 0x60, 0x51, 0xb4, 0xd8, 0xa6, 0x55, 0xca, + 0x3b, 0xd4, 0xb5, 0x6b, 0x4e, 0x91, 0xe6, 0x45, 0x6a, 0x54, 0xa8, 0xe5, 0xb9, 0xea, 0x2c, 0xda, + 0xbe, 0xd4, 0x49, 0x53, 0x4e, 0x30, 0xe4, 0x24, 0xfa, 0xcc, 0x95, 0x4f, 0xeb, 0xa9, 0x48, 0xa3, + 0x9e, 0x5a, 0x08, 0x24, 0x77, 0xa2, 0xcb, 0xf5, 0xd8, 0x27, 0x9b, 0x30, 0x52, 0x74, 0x28, 0x9b, + 0xe6, 0xb1, 0xcd, 0x8a, 0xaf, 0x26, 0xd3, 0x7c, 0x9c, 0x4f, 0xfb, 0x73, 0x7a, 0xfa, 0xa1, 0xff, + 0x7a, 0x90, 0x99, 0x16, 0x4a, 0x7d, 0x96, 0x8f, 0xbe, 0x48, 0x29, 0x39, 0x7f, 0x21, 0x0f, 0x40, + 0x13, 0xcf, 0x65, 0x00, 0x4a, 0x3c, 0xc3, 0x00, 0xf4, 0x1d, 0x88, 0x1f, 0x5d, 0x77, 0xf3, 0xbe, + 0x41, 0x53, 0x28, 0xea, 0xb2, 0x1c, 0xde, 0xe0, 0x51, 0x83, 0x05, 0x59, 0x58, 0xc9, 0xfb, 0xda, + 0xa3, 0xeb, 0xee, 0x66, 0x9b, 0x89, 0x10, 0xa0, 0xac, 0x24, 0x31, 0xe9, 0x42, 0x9b, 0x4a, 0xba, + 0xa7, 0x89, 0xb0, 0xbb, 0x29, 0x57, 0xac, 0x5b, 0xe4, 0x0a, 0x34, 0x3c, 0xb6, 0xcd, 0x9c, 0x75, + 0x6c, 0x23, 0x9b, 0x30, 0xc5, 0xcf, 0xac, 0xe7, 0x95, 0xf3, 0x2e, 0x2d, 0xda, 0x96, 0xe1, 0xaa, + 0x73, 0x8b, 0xca, 0x52, 0x94, 0x77, 0x60, 0xb8, 0xf9, 0xd0, 0x2b, 0xef, 0xf2, 0x2d, 0xb9, 0x03, + 0x6b, 0xd9, 0xfa, 0xdf, 0x04, 0xf8, 0xd4, 0xd3, 0xc0, 0x3f, 0x15, 0x98, 0xdb, 0x62, 0xfd, 0xac, + 0xa8, 0x4d, 0xe6, 0xf7, 0xa9, 0xdf, 0x19, 0x49, 0xed, 0x98, 0x72, 0x86, 0x76, 0xec, 0xdc, 0x2f, + 0xf3, 0x37, 0x61, 0xcc, 0xa2, 0x8f, 0xf2, 0x2d, 0xc5, 0x16, 0xef, 0x4d, 0x8b, 0x3e, 0xda, 0x69, + 0xaf, 0xb7, 0x71, 0x09, 0xd6, 0x7e, 0x3b, 0x00, 0x17, 0xda, 0x1c, 0x75, 0xab, 0xb6, 0xe5, 0x52, + 0xf2, 0x2b, 0x05, 0x54, 0x27, 0xd8, 0xc0, 0x4f, 0xcc, 0x2a, 0x5e, 0xad, 0xec, 0x71, 0xdf, 0xe3, + 0xab, 0x37, 0xfc, 0x8b, 0xb5, 0x93, 0x80, 0x74, 0xae, 0x85, 0x39, 0xc7, 0x79, 0xf9, 0x8d, 0xfb, + 0x72, 0xa3, 0x9e, 0xba, 0xec, 0x74, 0xa6, 0x90, 0xac, 0xbd, 0xd0, 0x85, 0x24, 0xe9, 0xc0, 0xa5, + 0xd3, 0xe4, 0x9f, 0x4b, 0x5a, 0x58, 0x30, 0x2b, 0x8d, 0x46, 0xdc, 0x4b, 0x7c, 0xba, 0xec, 0x67, + 0x60, 0xb8, 0x0a, 0x43, 0xd4, 0x71, 0x6c, 0x47, 0xd6, 0x89, 0x80, 0x4c, 0x8a, 0x80, 0xf6, 0x21, + 0x4e, 0x50, 0x61, 0x7d, 0xe4, 0x00, 0x08, 0x9f, 0xde, 0xf8, 0x5a, 0x8c, 0x6f, 0xfc, 0x7b, 0x24, + 0x5b, 0xc7, 0xb7, 0xc0, 0xc6, 0xcc, 0x42, 0xa3, 0x9e, 0x4a, 0xe2, 0x90, 0x16, 0x80, 0x72, 0xa4, + 0x13, 0xad, 0x7b, 0xda, 0x9f, 0x47, 0x61, 0x08, 0x2f, 0x78, 0x72, 0x05, 0x06, 0x71, 0xec, 0xe7, + 0xde, 0xe1, 0xe8, 0x6b, 0x85, 0x47, 0x7e, 0xdc, 0x27, 0x59, 0x98, 0xf4, 0x13, 0x31, 0xbf, 0xaf, + 0x17, 0x3d, 0xe1, 0xa5, 0x92, 0xb9, 0xd4, 0xa8, 0xa7, 0x54, 0x7f, 0xeb, 0x36, 0xee, 0x48, 0xcc, + 0x13, 0xe1, 0x1d, 0x72, 0x03, 0xe2, 0xd8, 0xa7, 0xf0, 0xb6, 0x45, 0xcc, 0x71, 0x58, 0x75, 0x19, + 0xcc, 0xdb, 0x0d, 0xb9, 0xea, 0x06, 0x28, 0x3b, 0x0e, 0xd8, 0xdd, 0xf8, 0xbc, 0x7c, 0x64, 0xc2, + 0xe3, 0x80, 0x78, 0x1b, 0x73, 0x5c, 0x82, 0x49, 0x09, 0x26, 0x9b, 0x57, 0x7a, 0xd9, 0xac, 0x98, + 0x9e, 0xff, 0x22, 0xbb, 0x80, 0x81, 0xc5, 0x60, 0x34, 0xef, 0xf0, 0x7b, 0x48, 0xc0, 0xb3, 0x99, + 0x05, 0x57, 0x75, 0x42, 0x1b, 0xa1, 0x96, 0x64, 0x22, 0xbc, 0x47, 0x7e, 0xa7, 0xc0, 0x95, 0x16, + 0x4d, 0xf9, 0xc2, 0x49, 0xf3, 0x14, 0xe7, 0x8b, 0x65, 0xdd, 0x75, 0xf9, 0x53, 0xcb, 0x88, 0xf4, + 0x3e, 0xdb, 0xc9, 0x80, 0xcc, 0x89, 0x7f, 0x9a, 0xd7, 0x19, 0xd3, 0xb6, 0x5e, 0xa1, 0xdc, 0xa6, + 0xab, 0xe2, 0x86, 0xbf, 0xec, 0xf4, 0xa2, 0xcf, 0xf5, 0x26, 0x21, 0xbb, 0x10, 0xaf, 0x52, 0xa7, + 0x62, 0xba, 0x2e, 0x76, 0xee, 0xfc, 0xd5, 0x78, 0x4e, 0xb2, 0x6a, 0x27, 0xd8, 0xe5, 0xf1, 0x96, + 0xc8, 0xe5, 0x78, 0x4b, 0x70, 0xf2, 0x5f, 0x0a, 0xc4, 0x25, 0x3e, 0x92, 0x83, 0x51, 0xb7, 0x56, + 0x38, 0xa4, 0xc5, 0x66, 0x85, 0x59, 0xe8, 0xac, 0x21, 0xbd, 0xcb, 0xc9, 0x44, 0xf7, 0x20, 0x78, + 0x42, 0xdd, 0x83, 0xc0, 0xf0, 0x8c, 0x53, 0xa7, 0xc0, 0x9f, 0x63, 0xfc, 0x33, 0xce, 0x80, 0xd0, + 0x19, 0x67, 0x40, 0xf2, 0x3d, 0x18, 0x11, 0x72, 0x59, 0xc6, 0x1f, 0x99, 0x96, 0x21, 0x67, 0x3c, + 0x5b, 0xcb, 0x19, 0xcf, 0xd6, 0xcd, 0x93, 0x31, 0x70, 0xfa, 0xc9, 0x48, 0x9a, 0x30, 0xdd, 0x21, + 0x6f, 0x9e, 0xa2, 0x4a, 0x29, 0x3d, 0xef, 0xca, 0x8f, 0x15, 0xb8, 0x72, 0xb6, 0x14, 0x39, 0x9b, + 0xfa, 0xb7, 0x64, 0xf5, 0xfe, 0x30, 0x15, 0x12, 0xd8, 0xa2, 0xad, 0x57, 0x19, 0xfd, 0xc9, 0x10, + 0x5c, 0x3c, 0x85, 0x9f, 0x35, 0xd9, 0xf3, 0x15, 0xfd, 0x7b, 0x66, 0xa5, 0x56, 0x09, 0x3a, 0xec, + 0x7d, 0x47, 0x2f, 0xb2, 0x22, 0x2f, 0xf2, 0xe2, 0x1b, 0xbd, 0xac, 0x48, 0xdf, 0xe7, 0x12, 0x7c, + 0xf4, 0xb6, 0xe0, 0xe7, 0x67, 0x23, 0x25, 0xce, 0xc6, 0x85, 0x4a, 0x67, 0xaa, 0x5c, 0xb7, 0x0d, + 0xf2, 0x7b, 0x05, 0x2e, 0x77, 0x35, 0x0e, 0xcf, 0xb0, 0x6d, 0x97, 0x31, 0xd7, 0xe2, 0xab, 0xeb, + 0x4f, 0x6b, 0x64, 0xe6, 0x64, 0xc7, 0xb6, 0xcb, 0xe2, 0xa2, 0x14, 0xa6, 0xbe, 0x50, 0x39, 0x8d, + 0x36, 0x77, 0xfa, 0x36, 0xbb, 0x2e, 0x4f, 0x0b, 0xc8, 0x79, 0x25, 0xa2, 0xd6, 0xdb, 0xc1, 0xb3, + 0xa9, 0x7e, 0x10, 0x4e, 0xc2, 0x97, 0xda, 0x23, 0x8b, 0x51, 0xe8, 0x2f, 0x11, 0xff, 0x30, 0x00, + 0xa9, 0x1e, 0x32, 0xc8, 0xc7, 0x67, 0x48, 0xc6, 0xb5, 0xb3, 0x58, 0x73, 0x4e, 0x09, 0xf9, 0x55, + 0x7c, 0x59, 0x2d, 0x0b, 0x31, 0x2c, 0xc9, 0xf7, 0x4c, 0xd7, 0x23, 0xd7, 0x61, 0x18, 0x5b, 0x51, + 0xbf, 0x64, 0x43, 0x50, 0xb2, 0x79, 0x73, 0xcc, 0x77, 0xe5, 0xe6, 0x98, 0x23, 0xda, 0x1e, 0x10, + 0xfe, 0xec, 0x58, 0x96, 0xfa, 0x37, 0x72, 0x0b, 0xc6, 0x8b, 0x1c, 0xa5, 0x86, 0xd4, 0x67, 0xe3, + 0x6f, 0x06, 0xcd, 0x8d, 0x70, 0xb7, 0x3d, 0x26, 0xe3, 0xda, 0x0d, 0x98, 0x44, 0xed, 0x77, 0x68, + 0xf3, 0xd9, 0xf9, 0x8c, 0x0d, 0x8c, 0x76, 0x0b, 0xd4, 0x5d, 0xcf, 0xa1, 0x7a, 0xc5, 0xb4, 0x4a, + 0xad, 0x32, 0x5e, 0x84, 0xa8, 0x55, 0xab, 0xa0, 0x88, 0x71, 0x1e, 0x48, 0xab, 0x56, 0x91, 0x03, + 0x69, 0xd5, 0x2a, 0xda, 0x9b, 0x40, 0x90, 0x6f, 0x83, 0x96, 0xa9, 0x47, 0xfb, 0x55, 0xff, 0x89, + 0x02, 0xc0, 0xdf, 0x29, 0x37, 0xad, 0x7d, 0xfb, 0xcc, 0x6d, 0xd7, 0x0d, 0x88, 0x63, 0x44, 0x8d, + 0xfc, 0xa1, 0x8d, 0x17, 0x9d, 0xb2, 0x34, 0xc4, 0xfb, 0x25, 0x0e, 0x6f, 0xd9, 0xa1, 0xdb, 0x0e, + 0x02, 0x94, 0xb1, 0x96, 0xa9, 0xee, 0xfa, 0xac, 0xd1, 0x80, 0x95, 0xc3, 0xad, 0xac, 0x01, 0xaa, + 0x3d, 0x82, 0x69, 0x74, 0x75, 0xaf, 0x6a, 0xe8, 0x5e, 0x30, 0x36, 0xbc, 0x21, 0xbf, 0xf0, 0x87, + 0xb3, 0xe1, 0xb4, 0x39, 0xa6, 0x8f, 0xb6, 0xb8, 0x06, 0x6a, 0x46, 0xf7, 0x8a, 0x07, 0x9d, 0xb4, + 0xbf, 0x07, 0xe3, 0xfb, 0xba, 0x59, 0xf6, 0x5f, 0xbe, 0xfc, 0x9c, 0x54, 0x03, 0x2b, 0xc2, 0x0c, + 0x3c, 0xad, 0x38, 0xcb, 0xdb, 0xad, 0x79, 0x3a, 0x26, 0xe3, 0x4d, 0x7f, 0xd7, 0xf1, 0x8d, 0xe4, + 0xab, 0xf2, 0xb7, 0x45, 0x7b, 0x6f, 0x7f, 0xc3, 0x0c, 0x7d, 0xf8, 0x1b, 0x87, 0x58, 0xd6, 0x32, + 0xee, 0xeb, 0xce, 0x11, 0x75, 0xb4, 0x8f, 0x14, 0x98, 0x0d, 0x9f, 0x8c, 0xfb, 0xd4, 0x75, 0xf5, + 0x12, 0x25, 0x5f, 0xeb, 0xcf, 0xff, 0xbb, 0x91, 0xe0, 0x19, 0x3a, 0x4a, 0x2d, 0x43, 0x14, 0xf4, + 0x09, 0x64, 0x6b, 0xea, 0xe3, 0xe7, 0x8b, 0xca, 0x0d, 0xd7, 0xdd, 0x48, 0x8e, 0xd1, 0x67, 0x46, + 0x60, 0x88, 0x1e, 0x53, 0xcb, 0x5b, 0x4e, 0x42, 0x5c, 0xfa, 0x0d, 0x96, 0xc4, 0x61, 0x44, 0x2c, + 0x13, 0x91, 0xe5, 0xab, 0x10, 0x97, 0x7e, 0xac, 0x23, 0x63, 0x30, 0xba, 0x6d, 0x1b, 0x74, 0xc7, + 0x76, 0xbc, 0x44, 0x84, 0xad, 0xee, 0x52, 0xdd, 0x28, 0x33, 0x52, 0x65, 0xf9, 0xe7, 0x0a, 0x8c, + 0xfa, 0x0f, 0xff, 0x04, 0x60, 0xf8, 0xed, 0xbd, 0xec, 0x5e, 0x76, 0x23, 0x11, 0x61, 0x02, 0x77, + 0xb2, 0xdb, 0x1b, 0x9b, 0xdb, 0x77, 0x12, 0x0a, 0x5b, 0xe4, 0xf6, 0xb6, 0xb7, 0xd9, 0x62, 0x80, + 0x8c, 0x43, 0x6c, 0x77, 0x6f, 0x7d, 0x3d, 0x9b, 0xdd, 0xc8, 0x6e, 0x24, 0xa2, 0x8c, 0xe9, 0xf6, + 0xda, 0xe6, 0xbd, 0xec, 0x46, 0x62, 0x90, 0xd1, 0xed, 0x6d, 0xbf, 0xb5, 0xfd, 0xe0, 0xdd, 0xed, + 0xc4, 0x10, 0xa7, 0xcb, 0xdc, 0xdf, 0x7c, 0xf8, 0x30, 0xbb, 0x91, 0x18, 0x66, 0x74, 0xf7, 0xb2, + 0x6b, 0xbb, 0xd9, 0x8d, 0xc4, 0x08, 0xdb, 0xda, 0xc9, 0x65, 0xb3, 0xf7, 0x77, 0xd8, 0xd6, 0x28, + 0x5b, 0xae, 0xaf, 0x6d, 0xaf, 0x67, 0xef, 0x31, 0x29, 0xb1, 0xd5, 0x3f, 0x46, 0x61, 0x0c, 0x43, + 0xe8, 0x3f, 0x26, 0xbd, 0x0e, 0x71, 0xfe, 0x1d, 0xf9, 0x3c, 0x26, 0x05, 0x39, 0x39, 0xd7, 0xf6, + 0xc8, 0x97, 0x65, 0xe1, 0xd2, 0x22, 0xe4, 0x16, 0x8c, 0x49, 0x4c, 0x2e, 0x99, 0x08, 0xb8, 0x58, + 0xe1, 0x4e, 0xbe, 0x80, 0xeb, 0x6e, 0xa9, 0xa5, 0x45, 0x98, 0x56, 0x7e, 0x5a, 0xfa, 0xd4, 0x2a, + 0x31, 0xf5, 0xd6, 0x1a, 0x3e, 0x8f, 0x5a, 0x84, 0x7c, 0x13, 0xe2, 0xbc, 0x7a, 0x72, 0xad, 0x17, + 0x02, 0xfe, 0x50, 0x51, 0x3d, 0xc5, 0x84, 0x34, 0x8c, 0xde, 0xa1, 0x1e, 0x67, 0x9f, 0x09, 0xd8, + 0x83, 0x5a, 0x9e, 0x94, 0x5c, 0xd1, 0x22, 0x64, 0x0b, 0x62, 0x3e, 0xbd, 0x4b, 0xb8, 0x7d, 0xdd, + 0x6e, 0x81, 0x64, 0xb2, 0xc3, 0xb6, 0x38, 0x0a, 0x5a, 0xe4, 0x35, 0x65, 0xf5, 0x67, 0x31, 0x18, + 0xe6, 0xd3, 0x37, 0x79, 0x07, 0x80, 0xff, 0x85, 0x95, 0x76, 0xb6, 0xe3, 0x4f, 0xab, 0xc9, 0xb9, + 0xce, 0x23, 0xbb, 0x36, 0xff, 0xa3, 0x3f, 0xfd, 0xfd, 0x17, 0x03, 0xd3, 0xda, 0xc4, 0xca, 0xf1, + 0xb5, 0x95, 0x43, 0xbb, 0x20, 0xfe, 0x71, 0xec, 0xa6, 0xb2, 0x4c, 0xde, 0x05, 0xe0, 0xd7, 0x66, + 0x58, 0x6e, 0xe8, 0x17, 0xbc, 0x24, 0x0f, 0x5b, 0xfb, 0xf5, 0xda, 0x2e, 0x98, 0xdf, 0x9d, 0x4c, + 0xf0, 0x77, 0x61, 0xac, 0x29, 0x78, 0x97, 0x7a, 0x44, 0x95, 0x7e, 0x94, 0x0b, 0x4b, 0xef, 0x16, + 0xfb, 0x4b, 0x28, 0x7c, 0x4e, 0x9b, 0x12, 0xc2, 0x5d, 0xea, 0x49, 0xf2, 0x2d, 0x48, 0xc8, 0x0f, + 0x45, 0x68, 0xfe, 0xc5, 0xce, 0x4f, 0x48, 0x5c, 0xcd, 0xa5, 0xd3, 0xde, 0x97, 0xb4, 0x14, 0x2a, + 0x9b, 0xd7, 0x66, 0x7c, 0x4f, 0xa4, 0xb7, 0x22, 0xca, 0xf4, 0xbd, 0x07, 0x71, 0xf1, 0xf3, 0x33, + 0xaa, 0x6a, 0x86, 0x3a, 0xfc, 0x9b, 0x74, 0x57, 0x67, 0x92, 0x28, 0x7f, 0x46, 0x9b, 0xf4, 0xe5, + 0x57, 0x39, 0x1f, 0x13, 0x7d, 0xa7, 0xff, 0x03, 0x39, 0x83, 0xe2, 0x26, 0xb4, 0x18, 0x13, 0x87, + 0x25, 0x90, 0x09, 0x2a, 0x3e, 0xdb, 0x21, 0x7d, 0x09, 0x85, 0x2e, 0x68, 0xf3, 0x4c, 0x68, 0x81, + 0x51, 0x51, 0x63, 0x85, 0xbf, 0xe2, 0x8b, 0x1b, 0x81, 0x29, 0xd9, 0xee, 0xff, 0x20, 0x5f, 0x44, + 0xc1, 0xb3, 0xc9, 0x44, 0xd3, 0xda, 0x95, 0x1f, 0xb0, 0x66, 0xe3, 0x43, 0x61, 0xf4, 0xb3, 0x9c, + 0x71, 0x61, 0x74, 0x32, 0x64, 0x74, 0x0d, 0x69, 0x24, 0xa3, 0xbf, 0xf5, 0x8c, 0x75, 0x40, 0x45, + 0x2d, 0x64, 0xb9, 0xcd, 0x03, 0x72, 0xbb, 0xaf, 0xfa, 0x20, 0xe4, 0x90, 0x76, 0x39, 0xc6, 0x73, + 0xaa, 0x1b, 0x22, 0xd1, 0x08, 0x91, 0xe3, 0xc1, 0x03, 0xf1, 0x9a, 0x42, 0x6e, 0xc2, 0xf0, 0x5d, + 0xfc, 0x07, 0x4a, 0xd2, 0xc5, 0xd3, 0x24, 0x3f, 0xa7, 0x9c, 0x68, 0xfd, 0x80, 0x16, 0x8f, 0x9a, + 0xb7, 0xfd, 0xfb, 0x9f, 0xff, 0x6d, 0x21, 0xf2, 0xc3, 0xc7, 0x0b, 0xca, 0xa7, 0x8f, 0x17, 0x94, + 0xcf, 0x1e, 0x2f, 0x28, 0x7f, 0x7d, 0xbc, 0xa0, 0x7c, 0xf4, 0x64, 0x21, 0xf2, 0xd9, 0x93, 0x85, + 0xc8, 0xe7, 0x4f, 0x16, 0x22, 0xdf, 0xfe, 0x3f, 0xe9, 0x7f, 0x3a, 0x75, 0xa7, 0xa2, 0x1b, 0x7a, + 0xd5, 0xb1, 0x0f, 0x69, 0xd1, 0x13, 0xab, 0x15, 0xf1, 0x4f, 0x9c, 0x9f, 0x0c, 0xcc, 0xac, 0x21, + 0xb0, 0xc3, 0xb7, 0xd3, 0x9b, 0x76, 0x7a, 0xad, 0x6a, 0x16, 0x86, 0xd1, 0x96, 0xd7, 0xff, 0x1b, + 0x00, 0x00, 0xff, 0xff, 0x52, 0xad, 0xfa, 0xb1, 0xe1, 0x2a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2283,6 +2278,322 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 +// QueueServiceClient is the client API for QueueService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueueServiceClient interface { + CreateQueue(ctx context.Context, in *Queue, opts ...grpc.CallOption) (*types.Empty, error) + CreateQueues(ctx context.Context, in *QueueList, opts ...grpc.CallOption) (*BatchQueueCreateResponse, error) + UpdateQueue(ctx context.Context, in *Queue, opts ...grpc.CallOption) (*types.Empty, error) + UpdateQueues(ctx context.Context, in *QueueList, opts ...grpc.CallOption) (*BatchQueueUpdateResponse, error) + DeleteQueue(ctx context.Context, in *QueueDeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) + GetQueue(ctx context.Context, in *QueueGetRequest, opts ...grpc.CallOption) (*Queue, error) + GetQueues(ctx context.Context, in *StreamingQueueGetRequest, opts ...grpc.CallOption) (QueueService_GetQueuesClient, error) +} + +type queueServiceClient struct { + cc *grpc.ClientConn +} + +func NewQueueServiceClient(cc *grpc.ClientConn) QueueServiceClient { + return &queueServiceClient{cc} +} + +func (c *queueServiceClient) CreateQueue(ctx context.Context, in *Queue, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/api.QueueService/CreateQueue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) CreateQueues(ctx context.Context, in *QueueList, opts ...grpc.CallOption) (*BatchQueueCreateResponse, error) { + out := new(BatchQueueCreateResponse) + err := c.cc.Invoke(ctx, "/api.QueueService/CreateQueues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) UpdateQueue(ctx context.Context, in *Queue, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/api.QueueService/UpdateQueue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) UpdateQueues(ctx context.Context, in *QueueList, opts ...grpc.CallOption) (*BatchQueueUpdateResponse, error) { + out := new(BatchQueueUpdateResponse) + err := c.cc.Invoke(ctx, "/api.QueueService/UpdateQueues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) DeleteQueue(ctx context.Context, in *QueueDeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/api.QueueService/DeleteQueue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) GetQueue(ctx context.Context, in *QueueGetRequest, opts ...grpc.CallOption) (*Queue, error) { + out := new(Queue) + err := c.cc.Invoke(ctx, "/api.QueueService/GetQueue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queueServiceClient) GetQueues(ctx context.Context, in *StreamingQueueGetRequest, opts ...grpc.CallOption) (QueueService_GetQueuesClient, error) { + stream, err := c.cc.NewStream(ctx, &_QueueService_serviceDesc.Streams[0], "/api.QueueService/GetQueues", opts...) + if err != nil { + return nil, err + } + x := &queueServiceGetQueuesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type QueueService_GetQueuesClient interface { + Recv() (*StreamingQueueMessage, error) + grpc.ClientStream +} + +type queueServiceGetQueuesClient struct { + grpc.ClientStream +} + +func (x *queueServiceGetQueuesClient) Recv() (*StreamingQueueMessage, error) { + m := new(StreamingQueueMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// QueueServiceServer is the server API for QueueService service. +type QueueServiceServer interface { + CreateQueue(context.Context, *Queue) (*types.Empty, error) + CreateQueues(context.Context, *QueueList) (*BatchQueueCreateResponse, error) + UpdateQueue(context.Context, *Queue) (*types.Empty, error) + UpdateQueues(context.Context, *QueueList) (*BatchQueueUpdateResponse, error) + DeleteQueue(context.Context, *QueueDeleteRequest) (*types.Empty, error) + GetQueue(context.Context, *QueueGetRequest) (*Queue, error) + GetQueues(*StreamingQueueGetRequest, QueueService_GetQueuesServer) error +} + +// UnimplementedQueueServiceServer can be embedded to have forward compatible implementations. +type UnimplementedQueueServiceServer struct { +} + +func (*UnimplementedQueueServiceServer) CreateQueue(ctx context.Context, req *Queue) (*types.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateQueue not implemented") +} +func (*UnimplementedQueueServiceServer) CreateQueues(ctx context.Context, req *QueueList) (*BatchQueueCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateQueues not implemented") +} +func (*UnimplementedQueueServiceServer) UpdateQueue(ctx context.Context, req *Queue) (*types.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateQueue not implemented") +} +func (*UnimplementedQueueServiceServer) UpdateQueues(ctx context.Context, req *QueueList) (*BatchQueueUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateQueues not implemented") +} +func (*UnimplementedQueueServiceServer) DeleteQueue(ctx context.Context, req *QueueDeleteRequest) (*types.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteQueue not implemented") +} +func (*UnimplementedQueueServiceServer) GetQueue(ctx context.Context, req *QueueGetRequest) (*Queue, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetQueue not implemented") +} +func (*UnimplementedQueueServiceServer) GetQueues(req *StreamingQueueGetRequest, srv QueueService_GetQueuesServer) error { + return status.Errorf(codes.Unimplemented, "method GetQueues not implemented") +} + +func RegisterQueueServiceServer(s *grpc.Server, srv QueueServiceServer) { + s.RegisterService(&_QueueService_serviceDesc, srv) +} + +func _QueueService_CreateQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Queue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).CreateQueue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/CreateQueue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).CreateQueue(ctx, req.(*Queue)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_CreateQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueList) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).CreateQueues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/CreateQueues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).CreateQueues(ctx, req.(*QueueList)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_UpdateQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Queue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).UpdateQueue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/UpdateQueue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).UpdateQueue(ctx, req.(*Queue)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_UpdateQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueList) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).UpdateQueues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/UpdateQueues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).UpdateQueues(ctx, req.(*QueueList)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_DeleteQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).DeleteQueue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/DeleteQueue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).DeleteQueue(ctx, req.(*QueueDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_GetQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueueServiceServer).GetQueue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.QueueService/GetQueue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueueServiceServer).GetQueue(ctx, req.(*QueueGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueueService_GetQueues_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingQueueGetRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueueServiceServer).GetQueues(m, &queueServiceGetQueuesServer{stream}) +} + +type QueueService_GetQueuesServer interface { + Send(*StreamingQueueMessage) error + grpc.ServerStream +} + +type queueServiceGetQueuesServer struct { + grpc.ServerStream +} + +func (x *queueServiceGetQueuesServer) Send(m *StreamingQueueMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _QueueService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "api.QueueService", + HandlerType: (*QueueServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateQueue", + Handler: _QueueService_CreateQueue_Handler, + }, + { + MethodName: "CreateQueues", + Handler: _QueueService_CreateQueues_Handler, + }, + { + MethodName: "UpdateQueue", + Handler: _QueueService_UpdateQueue_Handler, + }, + { + MethodName: "UpdateQueues", + Handler: _QueueService_UpdateQueues_Handler, + }, + { + MethodName: "DeleteQueue", + Handler: _QueueService_DeleteQueue_Handler, + }, + { + MethodName: "GetQueue", + Handler: _QueueService_GetQueue_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetQueues", + Handler: _QueueService_GetQueues_Handler, + ServerStreams: true, + }, + }, + Metadata: "pkg/api/submit.proto", +} + // SubmitClient is the client API for Submit service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. @@ -2835,11 +3146,6 @@ func (m *JobSubmitRequestItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.QueueTtlSeconds != 0 { - i = encodeVarintSubmit(dAtA, i, uint64(m.QueueTtlSeconds)) - i-- - dAtA[i] = 0x60 - } if len(m.Scheduler) > 0 { i -= len(m.Scheduler) copy(dAtA[i:], m.Scheduler) @@ -4640,9 +4946,6 @@ func (m *JobSubmitRequestItem) Size() (n int) { if l > 0 { n += 1 + l + sovSubmit(uint64(l)) } - if m.QueueTtlSeconds != 0 { - n += 1 + sovSubmit(uint64(m.QueueTtlSeconds)) - } return n } @@ -5406,7 +5709,6 @@ func (this *JobSubmitRequestItem) String() string { `Ingress:` + repeatedStringForIngress + `,`, `Services:` + repeatedStringForServices + `,`, `Scheduler:` + fmt.Sprintf("%v", this.Scheduler) + `,`, - `QueueTtlSeconds:` + fmt.Sprintf("%v", this.QueueTtlSeconds) + `,`, `}`, }, "") return s @@ -6598,25 +6900,6 @@ func (m *JobSubmitRequestItem) Unmarshal(dAtA []byte) error { } m.Scheduler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) - } - m.QueueTtlSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSubmit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QueueTtlSeconds |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipSubmit(dAtA[iNdEx:]) diff --git a/pkg/api/submit.proto b/pkg/api/submit.proto index 41f6a126bce..3fcd4b068be 100644 --- a/pkg/api/submit.proto +++ b/pkg/api/submit.proto @@ -29,8 +29,7 @@ message JobSubmitRequestItem { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. string scheduler = 11; - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - int64 queue_ttl_seconds = 12; + // Ordinal 12 was previously used for queue_ttl_seconds } message IngressConfig { @@ -190,6 +189,7 @@ message Queue { double priority_factor = 2; repeated string user_owners = 3; repeated string group_owners = 4; + // These are ignored and should be removed map resource_limits = 5 [deprecated = true]; // Map from priority class name to resource limit overrides for this queue and priority class. // If provided for a priority class, global limits for that priority class do not apply to this queue. @@ -269,6 +269,16 @@ message StreamingQueueMessage{ } } +service QueueService { + rpc CreateQueue (Queue) returns (google.protobuf.Empty) {} + rpc CreateQueues (QueueList) returns (BatchQueueCreateResponse) {} + rpc UpdateQueue (Queue) returns (google.protobuf.Empty) {} + rpc UpdateQueues (QueueList) returns (BatchQueueUpdateResponse){} + rpc DeleteQueue (QueueDeleteRequest) returns (google.protobuf.Empty) {} + rpc GetQueue (QueueGetRequest) returns (Queue) {} + rpc GetQueues (StreamingQueueGetRequest) returns (stream StreamingQueueMessage) {} +} + service Submit { rpc SubmitJobs (JobSubmitRequest) returns (JobSubmitResponse) { option (google.api.http) = { diff --git a/pkg/api/util.go b/pkg/api/util.go index a87329d8a98..9b634f35d7b 100644 --- a/pkg/api/util.go +++ b/pkg/api/util.go @@ -2,34 +2,13 @@ package api import ( "fmt" - "math" "strings" - "time" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" - "github.com/armadaproject/armada/internal/common/logging" - armadaresource "github.com/armadaproject/armada/internal/common/resource" - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/interfaces" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// IsTerminal returns true if the JobState s corresponds to a state -// that indicates the job has been terminated. -func (s JobState) IsTerminal() bool { - switch s { - case JobState_SUCCEEDED: - return true - case JobState_FAILED: - return true - } - return false -} - func NodeIdFromExecutorAndNodeName(executor, nodeName string) string { return fmt.Sprintf("%s-%s", executor, nodeName) } @@ -52,48 +31,6 @@ func JobRunStateFromApiJobState(s JobState) schedulerobjects.JobRunState { return schedulerobjects.JobRunState_UNKNOWN } -func (job *Job) GetPerQueuePriority() uint32 { - priority := job.Priority - if priority < 0 { - return 0 - } - if priority > math.MaxUint32 { - return math.MaxUint32 - } - return uint32(math.Round(priority)) -} - -func (job *Job) GetSubmitTime() time.Time { - return job.Created -} - -func (job *Job) GetPodRequirements(priorityClasses map[string]types.PriorityClass) *schedulerobjects.PodRequirements { - podSpec := job.GetMainPodSpec() - - priority, ok := PriorityFromPodSpec(podSpec, priorityClasses) - if priorityClasses != nil && !ok { - // Ignore this error if priorityByPriorityClassName is explicitly set to nil. - // We assume that in this case the caller is sure the priority does not need to be set. - err := errors.Errorf("unknown priorityClassName %s", podSpec.PriorityClassName) - logging.WithStacktrace(logrus.NewEntry(logrus.New()), err).Error("failed to get priority from priorityClassName") - } - - preemptionPolicy := string(v1.PreemptLowerPriority) - if podSpec.PreemptionPolicy != nil { - preemptionPolicy = string(*podSpec.PreemptionPolicy) - } - - return &schedulerobjects.PodRequirements{ - NodeSelector: podSpec.NodeSelector, - Affinity: podSpec.Affinity, - Tolerations: podSpec.Tolerations, - Annotations: maps.Clone(job.Annotations), - Priority: priority, - PreemptionPolicy: preemptionPolicy, - ResourceRequirements: job.GetResourceRequirements(), - } -} - // SchedulingResourceRequirementsFromPodSpec returns resource requests and limits necessary for scheduling a pod. // The requests and limits are set to: // @@ -137,120 +74,6 @@ func SchedulingResourceRequirementsFromPodSpec(podSpec *v1.PodSpec) v1.ResourceR return rv } -// PriorityFromPodSpec returns the priority in a pod spec. -// If priority is set directly, that value is returned. -// Otherwise, it returns the value of the key podSpec. -// In both cases the value along with true boolean is returned. -// PriorityClassName in priorityByPriorityClassName map. -// If no priority is set for the pod spec, 0 along with a false boolean would be returned -func PriorityFromPodSpec(podSpec *v1.PodSpec, priorityClasses map[string]types.PriorityClass) (int32, bool) { - // If there's no podspec there's nothing we can do - if podSpec == nil { - return 0, false - } - - // If a priority is directly specified, use that - if podSpec.Priority != nil { - return *podSpec.Priority, true - } - - // If we find a priority class use that - priorityClass, ok := priorityClasses[podSpec.PriorityClassName] - if ok { - return priorityClass.Priority, true - } - - // Couldn't find anything - return 0, false -} - -func (job *Job) GetPriorityClassName() string { - if podSpec := job.GetMainPodSpec(); podSpec != nil { - return podSpec.PriorityClassName - } - return "" -} - -func (job *Job) GetScheduledAtPriority() (int32, bool) { - return -1, false -} - -func (job *Job) GetNodeSelector() map[string]string { - podSpec := job.GetMainPodSpec() - return podSpec.NodeSelector -} - -func (job *Job) GetAffinity() *v1.Affinity { - podSpec := job.GetMainPodSpec() - return podSpec.Affinity -} - -func (job *Job) GetTolerations() []v1.Toleration { - podSpec := job.GetMainPodSpec() - return podSpec.Tolerations -} - -func (job *Job) GetResourceRequirements() v1.ResourceRequirements { - // Use pre-computed schedulingResourceRequirements if available. - // Otherwise compute it from the containers in podSpec. - podSpec := job.GetMainPodSpec() - if len(job.SchedulingResourceRequirements.Requests) > 0 || len(job.SchedulingResourceRequirements.Limits) > 0 { - return job.SchedulingResourceRequirements - } else { - return SchedulingResourceRequirementsFromPodSpec(podSpec) - } -} - -// GetSchedulingKey returns the scheduling key associated with a job. -// The second return value is always false since scheduling keys are not pre-computed for these jobs. -func (job *Job) GetSchedulingKey() (schedulerobjects.SchedulingKey, bool) { - return schedulerobjects.SchedulingKey{}, false -} - -// SchedulingOrderCompare defines the order in which jobs in a particular queue should be scheduled, -func (job *Job) SchedulingOrderCompare(other interfaces.LegacySchedulerJob) int { - // We need this cast for now to expose this method via an interface. - // This is safe since we only ever compare jobs of the same type. - return SchedulingOrderCompare(job, other.(*Job)) -} - -// SchedulingOrderCompare defines the order in which jobs in a queue should be scheduled -// (both when scheduling new jobs and when re-scheduling evicted jobs). -// Specifically, compare returns -// - 0 if the jobs have equal job id, -// - -1 if job should be scheduled before other, -// - +1 if other should be scheduled before other. -func SchedulingOrderCompare(job, other *Job) int { - if job.Id == other.Id { - return 0 - } - - // Jobs with higher in queue-priority come first. - if job.Priority < other.Priority { - return -1 - } else if job.Priority > other.Priority { - return 1 - } - - // Jobs that have been queuing for longer are scheduled first. - if cmp := job.Created.Compare(other.Created); cmp != 0 { - return cmp - } - - // Tie-break by jobId, which must be unique. - // This ensure there is a total order between jobs, i.e., no jobs are equal from an ordering point of view. - if job.Id < other.Id { - return -1 - } else if job.Id > other.Id { - return 1 - } - panic("We should never get here. Since we check for job id equality at the top of this function.") -} - -func (job *Job) GetJobSet() string { - return job.JobSetId -} - func (job *Job) GetMainPodSpec() *v1.PodSpec { if job.PodSpec != nil { return job.PodSpec @@ -272,11 +95,6 @@ func (job *JobSubmitRequestItem) GetMainPodSpec() *v1.PodSpec { return nil } -func (job *Job) TotalResourceRequest() armadaresource.ComputeResources { - podSpec := job.GetMainPodSpec() - return armadaresource.TotalPodResourceRequest(podSpec) -} - func ShortStringFromEventMessages(msgs []*EventMessage) string { var sb strings.Builder sb.WriteString("[") @@ -318,8 +136,6 @@ func JobIdFromApiEvent(msg *EventMessage) string { return e.Submitted.JobId case *EventMessage_Queued: return e.Queued.JobId - case *EventMessage_DuplicateFound: - return e.DuplicateFound.JobId case *EventMessage_Leased: return e.Leased.JobId case *EventMessage_LeaseReturned: @@ -350,8 +166,6 @@ func JobIdFromApiEvent(msg *EventMessage) string { return e.IngressInfo.JobId case *EventMessage_Reprioritizing: return e.Reprioritizing.JobId - case *EventMessage_Updated: - return e.Updated.JobId case *EventMessage_Preempted: return e.Preempted.JobId } @@ -364,8 +178,6 @@ func JobSetIdFromApiEvent(msg *EventMessage) string { return e.Submitted.JobSetId case *EventMessage_Queued: return e.Queued.JobSetId - case *EventMessage_DuplicateFound: - return e.DuplicateFound.JobSetId case *EventMessage_Leased: return e.Leased.JobSetId case *EventMessage_LeaseReturned: @@ -396,8 +208,6 @@ func JobSetIdFromApiEvent(msg *EventMessage) string { return e.IngressInfo.JobSetId case *EventMessage_Reprioritizing: return e.Reprioritizing.JobSetId - case *EventMessage_Updated: - return e.Updated.JobSetId case *EventMessage_Preempted: return e.Preempted.JobSetId } diff --git a/pkg/api/util_test.go b/pkg/api/util_test.go index c23eaab5b70..d3acc84923d 100644 --- a/pkg/api/util_test.go +++ b/pkg/api/util_test.go @@ -6,29 +6,6 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" - - "github.com/armadaproject/armada/internal/common/types" - "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" -) - -const ( - PriorityClassEmpty = "" - PriorityClass0 = "priority-0" - PriorityClass1 = "priority-1" - PriorityClass2 = "priority-2" - PriorityClass3 = "priority-3" -) - -var ( - TestPriorityClasses = map[string]types.PriorityClass{ - PriorityClassEmpty: {Priority: 0, Preemptible: true}, - PriorityClass0: {Priority: 0, Preemptible: true}, - PriorityClass1: {Priority: 1, Preemptible: true}, - PriorityClass2: {Priority: 2, Preemptible: true}, - PriorityClass3: {Priority: 3, Preemptible: false}, - } - TestDefaultPriorityClass = PriorityClass3 - TestPriorities = []int32{0, 1, 2, 3} ) func TestSchedulingResourceRequirementsFromPodSpec(t *testing.T) { @@ -263,228 +240,3 @@ func QuantityWithMilliValue(v int64) resource.Quantity { q.SetMilli(v) return q } - -func TestJobGetPodRequirements(t *testing.T) { - tests := map[string]struct { - job *Job - expected *schedulerobjects.PodRequirements - }{ - "queue priority": { - job: &Job{ - Priority: 10, - PodSpec: &v1.PodSpec{ - // Priority: pointerFromValue(int32(10)), - // PriorityClassName: , - }, - }, - expected: &schedulerobjects.PodRequirements{ - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: make(v1.ResourceList), - Limits: make(v1.ResourceList), - }, - }, - }, - "priorityClass priority": { - job: &Job{ - PodSpec: &v1.PodSpec{ - PriorityClassName: PriorityClass1, - }, - }, - expected: &schedulerobjects.PodRequirements{ - Priority: 1, - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: make(v1.ResourceList), - Limits: make(v1.ResourceList), - }, - }, - }, - "preemptionPolicy": { - job: &Job{ - PodSpec: &v1.PodSpec{ - PreemptionPolicy: pointerFromValue(v1.PreemptNever), - }, - }, - expected: &schedulerobjects.PodRequirements{ - PreemptionPolicy: string(v1.PreemptNever), - ResourceRequirements: v1.ResourceRequirements{ - Requests: make(v1.ResourceList), - Limits: make(v1.ResourceList), - }, - }, - }, - "targeting": { - job: &Job{ - PodSpec: &v1.PodSpec{ - NodeSelector: map[string]string{"label": "value"}, - Affinity: &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "affinityKey", - }, - }, - }, - }, - }, - }, - }, - Tolerations: []v1.Toleration{ - { - Key: "tolerationKey", - }, - }, - }, - }, - expected: &schedulerobjects.PodRequirements{ - NodeSelector: map[string]string{"label": "value"}, - Affinity: &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "affinityKey", - }, - }, - }, - }, - }, - }, - }, - Tolerations: []v1.Toleration{ - { - Key: "tolerationKey", - }, - }, - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: make(v1.ResourceList), - Limits: make(v1.ResourceList), - }, - }, - }, - "annotations": { - job: &Job{ - Annotations: map[string]string{"key": "value"}, - PodSpec: &v1.PodSpec{}, - }, - expected: &schedulerobjects.PodRequirements{ - Annotations: map[string]string{"key": "value"}, - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: make(v1.ResourceList), - Limits: make(v1.ResourceList), - }, - }, - }, - "schedulingResourceRequirements": { - job: &Job{ - PodSpec: &v1.PodSpec{ - Containers: []v1.Container{ - { - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{"foo": QuantityWithMilliValue(1000)}, - Limits: v1.ResourceList{"bar": QuantityWithMilliValue(2000)}, - }, - }, - }, - }, - }, - expected: &schedulerobjects.PodRequirements{ - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{"foo": QuantityWithMilliValue(1000)}, - Limits: v1.ResourceList{"bar": QuantityWithMilliValue(2000)}, - }, - }, - }, - "schedulingResourceRequirements pre-computed requests": { - job: &Job{ - PodSpec: &v1.PodSpec{}, - SchedulingResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{"foo": resource.MustParse("1")}, - }, - }, - expected: &schedulerobjects.PodRequirements{ - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{"foo": resource.MustParse("1")}, - Limits: nil, - }, - }, - }, - "schedulingResourceRequirements pre-computed limits": { - job: &Job{ - PodSpec: &v1.PodSpec{}, - SchedulingResourceRequirements: v1.ResourceRequirements{ - Limits: v1.ResourceList{"foo": resource.MustParse("1")}, - }, - }, - expected: &schedulerobjects.PodRequirements{ - PreemptionPolicy: string(v1.PreemptLowerPriority), - ResourceRequirements: v1.ResourceRequirements{ - Requests: nil, - Limits: v1.ResourceList{"foo": resource.MustParse("1")}, - }, - }, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expected, tc.job.GetPodRequirements(TestPriorityClasses)) - }) - } -} - -func TestPriorityFromPodSpec(t *testing.T) { - tests := map[string]struct { - podSpec *v1.PodSpec - expectedPriority int32 - expectedOk bool - }{ - "nil podSpec": { - podSpec: nil, - expectedPriority: 0, - expectedOk: false, - }, - "priority already set": { - podSpec: &v1.PodSpec{ - Priority: pointerFromValue(int32(1)), - PriorityClassName: PriorityClass2, - }, - expectedPriority: 1, - expectedOk: true, - }, - "existing priorityClass": { - podSpec: &v1.PodSpec{ - PriorityClassName: PriorityClass2, - }, - expectedPriority: 2, - expectedOk: true, - }, - "non-existing priorityClass": { - podSpec: &v1.PodSpec{ - PriorityClassName: "does not exist", - }, - expectedPriority: 0, - expectedOk: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - p, ok := PriorityFromPodSpec(tc.podSpec, TestPriorityClasses) - assert.Equal(t, tc.expectedPriority, p) - assert.Equal(t, tc.expectedOk, ok) - }) - } -} - -func pointerFromValue[T any](v T) *T { - return &v -} diff --git a/pkg/armadaevents/events.pb.go b/pkg/armadaevents/events.pb.go index e9418f58ad5..6c8b802d274 100644 --- a/pkg/armadaevents/events.pb.go +++ b/pkg/armadaevents/events.pb.go @@ -197,7 +197,6 @@ type EventSequence_Event struct { // *EventSequence_Event_JobRunRunning // *EventSequence_Event_JobRunSucceeded // *EventSequence_Event_JobRunErrors - // *EventSequence_Event_JobDuplicateDetected // *EventSequence_Event_StandaloneIngressInfo // *EventSequence_Event_ResourceUtilisation // *EventSequence_Event_JobRunPreempted @@ -205,6 +204,8 @@ type EventSequence_Event struct { // *EventSequence_Event_JobRunPreemptionRequested // *EventSequence_Event_JobRequeued // *EventSequence_Event_JobPreemptionRequested + // *EventSequence_Event_JobRunCancelled + // *EventSequence_Event_JobValidated Event isEventSequence_Event_Event `protobuf_oneof:"event"` } @@ -289,9 +290,6 @@ type EventSequence_Event_JobRunSucceeded struct { type EventSequence_Event_JobRunErrors struct { JobRunErrors *JobRunErrors `protobuf:"bytes,14,opt,name=jobRunErrors,proto3,oneof" json:"jobRunErrors,omitempty"` } -type EventSequence_Event_JobDuplicateDetected struct { - JobDuplicateDetected *JobDuplicateDetected `protobuf:"bytes,15,opt,name=jobDuplicateDetected,proto3,oneof" json:"jobDuplicateDetected,omitempty"` -} type EventSequence_Event_StandaloneIngressInfo struct { StandaloneIngressInfo *StandaloneIngressInfo `protobuf:"bytes,16,opt,name=standaloneIngressInfo,proto3,oneof" json:"standaloneIngressInfo,omitempty"` } @@ -313,6 +311,12 @@ type EventSequence_Event_JobRequeued struct { type EventSequence_Event_JobPreemptionRequested struct { JobPreemptionRequested *JobPreemptionRequested `protobuf:"bytes,23,opt,name=jobPreemptionRequested,proto3,oneof" json:"jobPreemptionRequested,omitempty"` } +type EventSequence_Event_JobRunCancelled struct { + JobRunCancelled *JobRunCancelled `protobuf:"bytes,24,opt,name=jobRunCancelled,proto3,oneof" json:"jobRunCancelled,omitempty"` +} +type EventSequence_Event_JobValidated struct { + JobValidated *JobValidated `protobuf:"bytes,25,opt,name=jobValidated,proto3,oneof" json:"jobValidated,omitempty"` +} func (*EventSequence_Event_SubmitJob) isEventSequence_Event_Event() {} func (*EventSequence_Event_ReprioritiseJob) isEventSequence_Event_Event() {} @@ -328,7 +332,6 @@ func (*EventSequence_Event_JobRunAssigned) isEventSequence_Event_Event() func (*EventSequence_Event_JobRunRunning) isEventSequence_Event_Event() {} func (*EventSequence_Event_JobRunSucceeded) isEventSequence_Event_Event() {} func (*EventSequence_Event_JobRunErrors) isEventSequence_Event_Event() {} -func (*EventSequence_Event_JobDuplicateDetected) isEventSequence_Event_Event() {} func (*EventSequence_Event_StandaloneIngressInfo) isEventSequence_Event_Event() {} func (*EventSequence_Event_ResourceUtilisation) isEventSequence_Event_Event() {} func (*EventSequence_Event_JobRunPreempted) isEventSequence_Event_Event() {} @@ -336,6 +339,8 @@ func (*EventSequence_Event_PartitionMarker) isEventSequence_Event_Event() func (*EventSequence_Event_JobRunPreemptionRequested) isEventSequence_Event_Event() {} func (*EventSequence_Event_JobRequeued) isEventSequence_Event_Event() {} func (*EventSequence_Event_JobPreemptionRequested) isEventSequence_Event_Event() {} +func (*EventSequence_Event_JobRunCancelled) isEventSequence_Event_Event() {} +func (*EventSequence_Event_JobValidated) isEventSequence_Event_Event() {} func (m *EventSequence_Event) GetEvent() isEventSequence_Event_Event { if m != nil { @@ -449,13 +454,6 @@ func (m *EventSequence_Event) GetJobRunErrors() *JobRunErrors { return nil } -func (m *EventSequence_Event) GetJobDuplicateDetected() *JobDuplicateDetected { - if x, ok := m.GetEvent().(*EventSequence_Event_JobDuplicateDetected); ok { - return x.JobDuplicateDetected - } - return nil -} - func (m *EventSequence_Event) GetStandaloneIngressInfo() *StandaloneIngressInfo { if x, ok := m.GetEvent().(*EventSequence_Event_StandaloneIngressInfo); ok { return x.StandaloneIngressInfo @@ -505,6 +503,20 @@ func (m *EventSequence_Event) GetJobPreemptionRequested() *JobPreemptionRequeste return nil } +func (m *EventSequence_Event) GetJobRunCancelled() *JobRunCancelled { + if x, ok := m.GetEvent().(*EventSequence_Event_JobRunCancelled); ok { + return x.JobRunCancelled + } + return nil +} + +func (m *EventSequence_Event) GetJobValidated() *JobValidated { + if x, ok := m.GetEvent().(*EventSequence_Event_JobValidated); ok { + return x.JobValidated + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*EventSequence_Event) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -522,7 +534,6 @@ func (*EventSequence_Event) XXX_OneofWrappers() []interface{} { (*EventSequence_Event_JobRunRunning)(nil), (*EventSequence_Event_JobRunSucceeded)(nil), (*EventSequence_Event_JobRunErrors)(nil), - (*EventSequence_Event_JobDuplicateDetected)(nil), (*EventSequence_Event_StandaloneIngressInfo)(nil), (*EventSequence_Event_ResourceUtilisation)(nil), (*EventSequence_Event_JobRunPreempted)(nil), @@ -530,6 +541,8 @@ func (*EventSequence_Event) XXX_OneofWrappers() []interface{} { (*EventSequence_Event_JobRunPreemptionRequested)(nil), (*EventSequence_Event_JobRequeued)(nil), (*EventSequence_Event_JobPreemptionRequested)(nil), + (*EventSequence_Event_JobRunCancelled)(nil), + (*EventSequence_Event_JobValidated)(nil), } } @@ -540,6 +553,9 @@ type ResourceUtilisation struct { ResourceInfo *KubernetesResourceInfo `protobuf:"bytes,3,opt,name=resource_info,json=resourceInfo,proto3" json:"resourceInfo,omitempty"` MaxResourcesForPeriod map[string]resource.Quantity `protobuf:"bytes,4,rep,name=max_resources_for_period,json=maxResourcesForPeriod,proto3" json:"maxResourcesForPeriod" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` TotalCumulativeUsage map[string]resource.Quantity `protobuf:"bytes,5,rep,name=total_cumulative_usage,json=totalCumulativeUsage,proto3" json:"totalCumulativeUsage" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // This is a string representation of the job_id. Eventually we will retire the job_id field and just use strings + JobIdStr string `protobuf:"bytes,6,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,7,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *ResourceUtilisation) Reset() { *m = ResourceUtilisation{} } @@ -610,6 +626,20 @@ func (m *ResourceUtilisation) GetTotalCumulativeUsage() map[string]resource.Quan return nil } +func (m *ResourceUtilisation) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *ResourceUtilisation) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // A UUID, encoded in accordance with section 4.1.2 of RFC 4122 // (technically equivalent to ITU-T Rec. X.667 and ISO/IEC 9834-8). // As of March 2022, this seems to be the most efficient way to include UUIDs in proto messages; see @@ -706,8 +736,9 @@ type SubmitJob struct { Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` // Indicates whether job is a duplicate IsDuplicate bool `protobuf:"varint,12,opt,name=isDuplicate,proto3" json:"isDuplicate,omitempty"` - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - QueueTtlSeconds int64 `protobuf:"varint,13,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` + // Ordinal 13 was originally used for queue_ttl_seconds + // This is a string representation of the job_id. Eventually we will retire the job_id field and just use strings + JobIdStr string `protobuf:"bytes,14,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *SubmitJob) Reset() { *m = SubmitJob{} } @@ -827,11 +858,11 @@ func (m *SubmitJob) GetIsDuplicate() bool { return false } -func (m *SubmitJob) GetQueueTtlSeconds() int64 { +func (m *SubmitJob) GetJobIdStr() string { if m != nil { - return m.QueueTtlSeconds + return m.JobIdStr } - return 0 + return "" } // Kubernetes objects that can serve as main objects for an Armada job. @@ -1187,6 +1218,7 @@ func (m *PodSpecWithAvoidList) GetNodeAvoidlist() []string { type ReprioritiseJob struct { JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` Priority uint32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *ReprioritiseJob) Reset() { *m = ReprioritiseJob{} } @@ -1236,11 +1268,19 @@ func (m *ReprioritiseJob) GetPriority() uint32 { return 0 } +func (m *ReprioritiseJob) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + type JobRequeued struct { JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` SchedulingInfo *schedulerobjects.JobSchedulingInfo `protobuf:"bytes,2,opt,name=scheduling_info,json=schedulingInfo,proto3" json:"schedulingInfo,omitempty"` // Used by the scheduler to maintain a consistent state - UpdateSequenceNumber int32 `protobuf:"varint,3,opt,name=update_sequence_number,json=updateSequenceNumber,proto3" json:"updateSequenceNumber,omitempty"` + UpdateSequenceNumber int32 `protobuf:"varint,3,opt,name=update_sequence_number,json=updateSequenceNumber,proto3" json:"updateSequenceNumber,omitempty"` + JobIdStr string `protobuf:"bytes,4,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *JobRequeued) Reset() { *m = JobRequeued{} } @@ -1297,6 +1337,13 @@ func (m *JobRequeued) GetUpdateSequenceNumber() int32 { return 0 } +func (m *JobRequeued) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + // Set the priority of all jobs part of a job set. // This sets the priority of all jobs in the job set currently in the queued state. type ReprioritiseJobSet struct { @@ -1348,6 +1395,7 @@ func (m *ReprioritiseJobSet) GetPriority() uint32 { type ReprioritisedJob struct { JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` Priority uint32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *ReprioritisedJob) Reset() { *m = ReprioritisedJob{} } @@ -1397,12 +1445,20 @@ func (m *ReprioritisedJob) GetPriority() uint32 { return 0 } +func (m *ReprioritisedJob) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + // A request to cancel a particular job. // This will cancel all runs (preempting it if running) for the job (i.e., move them to the failed state) // and then cancel job itself (i.e., move it to the failed state). type CancelJob struct { - JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *CancelJob) Reset() { *m = CancelJob{} } @@ -1452,6 +1508,13 @@ func (m *CancelJob) GetReason() string { return "" } +func (m *CancelJob) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + // Filter to be used when cancelling job sets // This allows users to cancel all jobs in a given state for a specific job set type JobSetFilter struct { @@ -1555,8 +1618,9 @@ func (m *CancelJobSet) GetReason() string { // Generated by the scheduler in response to CancelJob and CancelJobSet. // One such message is generated per job that was cancelled. type CancelledJob struct { - JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *CancelledJob) Reset() { *m = CancelledJob{} } @@ -1606,12 +1670,20 @@ func (m *CancelledJob) GetReason() string { return "" } +func (m *CancelledJob) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + type JobSucceeded struct { JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` // Runtime information, e.g., which node the job is running on, its IP address etc, // for each resource created for the job run. // TODO: remove this once we have fixed the external api ResourceInfos []*KubernetesResourceInfo `protobuf:"bytes,2,rep,name=resourceInfos,proto3" json:"resourceInfos,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *JobSucceeded) Reset() { *m = JobSucceeded{} } @@ -1661,6 +1733,13 @@ func (m *JobSucceeded) GetResourceInfos() []*KubernetesResourceInfo { return nil } +func (m *JobSucceeded) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + // Indicates that a job has been leased to a cluster by the Armada scheduler. type JobRunLeased struct { RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` @@ -1685,6 +1764,8 @@ type JobRunLeased struct { // for example, it may add additional tolerations to runs that are scheduled // as away jobs. PodRequirementsOverlay *schedulerobjects.PodRequirements `protobuf:"bytes,9,opt,name=pod_requirements_overlay,json=podRequirementsOverlay,proto3" json:"podRequirementsOverlay,omitempty"` + JobIdStr string `protobuf:"bytes,10,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,11,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunLeased) Reset() { *m = JobRunLeased{} } @@ -1776,6 +1857,20 @@ func (m *JobRunLeased) GetPodRequirementsOverlay() *schedulerobjects.PodRequirem return nil } +func (m *JobRunLeased) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunLeased) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Indicates that a job has been assigned to nodes by Kubernetes. type JobRunAssigned struct { RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` @@ -1784,6 +1879,8 @@ type JobRunAssigned struct { // for each resource created for the job run. // Included here and in JobRunRunning for compatibility with legacy messages. ResourceInfos []*KubernetesResourceInfo `protobuf:"bytes,3,rep,name=resourceInfos,proto3" json:"resourceInfos,omitempty"` + JobIdStr string `protobuf:"bytes,4,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,5,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunAssigned) Reset() { *m = JobRunAssigned{} } @@ -1840,6 +1937,20 @@ func (m *JobRunAssigned) GetResourceInfos() []*KubernetesResourceInfo { return nil } +func (m *JobRunAssigned) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunAssigned) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Indicates that the resources required by the job have been created and that the job is now running. type JobRunRunning struct { RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` @@ -1847,6 +1958,8 @@ type JobRunRunning struct { // Runtime information, e.g., which node the job is running on, its IP address etc, // for each resource created for the job run. ResourceInfos []*KubernetesResourceInfo `protobuf:"bytes,3,rep,name=resourceInfos,proto3" json:"resourceInfos,omitempty"` + JobIdStr string `protobuf:"bytes,4,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,5,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunRunning) Reset() { *m = JobRunRunning{} } @@ -1903,6 +2016,20 @@ func (m *JobRunRunning) GetResourceInfos() []*KubernetesResourceInfo { return nil } +func (m *JobRunRunning) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunRunning) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Message containing runtime information about some resource created for a job. type KubernetesResourceInfo struct { ObjectMeta *ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` @@ -2119,6 +2246,8 @@ type StandaloneIngressInfo struct { PodNumber int32 `protobuf:"varint,6,opt,name=pod_number,json=podNumber,proto3" json:"podNumber,omitempty"` PodName string `protobuf:"bytes,7,opt,name=pod_name,json=podName,proto3" json:"podName,omitempty"` PodNamespace string `protobuf:"bytes,8,opt,name=pod_namespace,json=podNamespace,proto3" json:"podNamespace,omitempty"` + JobIdStr string `protobuf:"bytes,9,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,10,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *StandaloneIngressInfo) Reset() { *m = StandaloneIngressInfo{} } @@ -2210,6 +2339,20 @@ func (m *StandaloneIngressInfo) GetPodNamespace() string { return "" } +func (m *StandaloneIngressInfo) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *StandaloneIngressInfo) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Indicates that the job finished successfully (i.e., in the expected manner). type JobRunSucceeded struct { RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` @@ -2218,6 +2361,8 @@ type JobRunSucceeded struct { // for each resource created for the job run. // TODO: remove this once we have fixed the external api ResourceInfos []*KubernetesResourceInfo `protobuf:"bytes,3,rep,name=resourceInfos,proto3" json:"resourceInfos,omitempty"` + JobIdStr string `protobuf:"bytes,4,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,5,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunSucceeded) Reset() { *m = JobRunSucceeded{} } @@ -2274,13 +2419,28 @@ func (m *JobRunSucceeded) GetResourceInfos() []*KubernetesResourceInfo { return nil } +func (m *JobRunSucceeded) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunSucceeded) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Message containing a set of errors associated with a particular job. // Contains a flag, which if set to true indicates that the job has failed. // Otherwise, this message represents a set of errors from which the system has recovered. type JobErrors struct { JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` // A set of errors that occurred within some context. - Errors []*Error `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` + Errors []*Error `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *JobErrors) Reset() { *m = JobErrors{} } @@ -2330,6 +2490,13 @@ func (m *JobErrors) GetErrors() []*Error { return nil } +func (m *JobErrors) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + // Message containing a set of errors associated with a particular job run. // Contains a flag, which if set to true indicates that the job run has failed. // Otherwise, this message represents a set of errors from which the system has recovered. @@ -2341,7 +2508,9 @@ type JobRunErrors struct { RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` JobId *Uuid `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` // A set of errors that occurred within some context. - Errors []*Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` + Errors []*Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` + JobIdStr string `protobuf:"bytes,4,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,5,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunErrors) Reset() { *m = JobRunErrors{} } @@ -2398,6 +2567,20 @@ func (m *JobRunErrors) GetErrors() []*Error { return nil } +func (m *JobRunErrors) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunErrors) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Represents a failure that took place in the course of a job run (i.e., an attempt to run a job). // Each failure is either a resource failure or a system-level failure. // Resource failures correspond to a problem with a specific Kubernetes resource @@ -2423,6 +2606,7 @@ type Error struct { // *Error_PodTerminated // *Error_JobRunPreemptedError // *Error_GangJobUnschedulable + // *Error_JobRejected Reason isError_Reason `protobuf_oneof:"reason"` } @@ -2498,6 +2682,9 @@ type Error_JobRunPreemptedError struct { type Error_GangJobUnschedulable struct { GangJobUnschedulable *GangJobUnschedulable `protobuf:"bytes,12,opt,name=gangJobUnschedulable,proto3,oneof" json:"gangJobUnschedulable,omitempty"` } +type Error_JobRejected struct { + JobRejected *JobRejected `protobuf:"bytes,13,opt,name=jobRejected,proto3,oneof" json:"jobRejected,omitempty"` +} func (*Error_KubernetesError) isError_Reason() {} func (*Error_ContainerError) isError_Reason() {} @@ -2510,6 +2697,7 @@ func (*Error_PodLeaseReturned) isError_Reason() {} func (*Error_PodTerminated) isError_Reason() {} func (*Error_JobRunPreemptedError) isError_Reason() {} func (*Error_GangJobUnschedulable) isError_Reason() {} +func (*Error_JobRejected) isError_Reason() {} func (m *Error) GetReason() isError_Reason { if m != nil { @@ -2602,6 +2790,13 @@ func (m *Error) GetGangJobUnschedulable() *GangJobUnschedulable { return nil } +func (m *Error) GetJobRejected() *JobRejected { + if x, ok := m.GetReason().(*Error_JobRejected); ok { + return x.JobRejected + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Error) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -2616,6 +2811,7 @@ func (*Error) XXX_OneofWrappers() []interface{} { (*Error_PodTerminated)(nil), (*Error_JobRunPreemptedError)(nil), (*Error_GangJobUnschedulable)(nil), + (*Error_JobRejected)(nil), } } @@ -2683,6 +2879,7 @@ type PodError struct { PodNumber int32 `protobuf:"varint,4,opt,name=pod_number,json=podNumber,proto3" json:"podNumber,omitempty"` ContainerErrors []*ContainerError `protobuf:"bytes,5,rep,name=containerErrors,proto3" json:"containerErrors,omitempty"` KubernetesReason KubernetesReason `protobuf:"varint,6,opt,name=kubernetes_reason,json=kubernetesReason,proto3,enum=armadaevents.KubernetesReason" json:"kubernetesReason,omitempty"` + DebugMessage string `protobuf:"bytes,7,opt,name=debugMessage,proto3" json:"debugMessage,omitempty"` } func (m *PodError) Reset() { *m = PodError{} } @@ -2760,6 +2957,13 @@ func (m *PodError) GetKubernetesReason() KubernetesReason { return KubernetesReason_AppError } +func (m *PodError) GetDebugMessage() string { + if m != nil { + return m.DebugMessage + } + return "" +} + type ContainerError struct { // this ObjectMeta identifies the container ObjectMeta *ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` @@ -2847,6 +3051,7 @@ type PodLeaseReturned struct { Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` PodNumber int32 `protobuf:"varint,3,opt,name=pod_number,json=podNumber,proto3" json:"podNumber,omitempty"` RunAttempted bool `protobuf:"varint,4,opt,name=run_attempted,json=runAttempted,proto3" json:"runAttempted,omitempty"` + DebugMessage string `protobuf:"bytes,5,opt,name=debugMessage,proto3" json:"debugMessage,omitempty"` } func (m *PodLeaseReturned) Reset() { *m = PodLeaseReturned{} } @@ -2910,6 +3115,13 @@ func (m *PodLeaseReturned) GetRunAttempted() bool { return false } +func (m *PodLeaseReturned) GetDebugMessage() string { + if m != nil { + return m.DebugMessage + } + return "" +} + // Indicates that the lease on the job that the pod was part of could not be renewed. // If this happens, the executor deletes the pod and generates a JobRunError with this message as the reason. type PodTerminated struct { @@ -3246,25 +3458,22 @@ func (m *GangJobUnschedulable) GetMessage() string { return "" } -// Generated by the scheduler whenever it detects a SubmitJob message that includes a previously used deduplication id -// (i.e., when it detects a duplicate job submission). -type JobDuplicateDetected struct { - NewJobId *Uuid `protobuf:"bytes,1,opt,name=new_job_id,json=newJobId,proto3" json:"newJobId,omitempty"` - OldJobId *Uuid `protobuf:"bytes,2,opt,name=old_job_id,json=oldJobId,proto3" json:"oldJobId,omitempty"` +type JobRejected struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } -func (m *JobDuplicateDetected) Reset() { *m = JobDuplicateDetected{} } -func (m *JobDuplicateDetected) String() string { return proto.CompactTextString(m) } -func (*JobDuplicateDetected) ProtoMessage() {} -func (*JobDuplicateDetected) Descriptor() ([]byte, []int) { +func (m *JobRejected) Reset() { *m = JobRejected{} } +func (m *JobRejected) String() string { return proto.CompactTextString(m) } +func (*JobRejected) ProtoMessage() {} +func (*JobRejected) Descriptor() ([]byte, []int) { return fileDescriptor_6aab92ca59e015f8, []int{39} } -func (m *JobDuplicateDetected) XXX_Unmarshal(b []byte) error { +func (m *JobRejected) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *JobDuplicateDetected) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *JobRejected) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_JobDuplicateDetected.Marshal(b, m, deterministic) + return xxx_messageInfo_JobRejected.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -3274,30 +3483,23 @@ func (m *JobDuplicateDetected) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *JobDuplicateDetected) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobDuplicateDetected.Merge(m, src) +func (m *JobRejected) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobRejected.Merge(m, src) } -func (m *JobDuplicateDetected) XXX_Size() int { +func (m *JobRejected) XXX_Size() int { return m.Size() } -func (m *JobDuplicateDetected) XXX_DiscardUnknown() { - xxx_messageInfo_JobDuplicateDetected.DiscardUnknown(m) +func (m *JobRejected) XXX_DiscardUnknown() { + xxx_messageInfo_JobRejected.DiscardUnknown(m) } -var xxx_messageInfo_JobDuplicateDetected proto.InternalMessageInfo - -func (m *JobDuplicateDetected) GetNewJobId() *Uuid { - if m != nil { - return m.NewJobId - } - return nil -} +var xxx_messageInfo_JobRejected proto.InternalMessageInfo -func (m *JobDuplicateDetected) GetOldJobId() *Uuid { +func (m *JobRejected) GetMessage() string { if m != nil { - return m.OldJobId + return m.Message } - return nil + return "" } // Message to indicate that a JobRun has been preempted. @@ -3309,7 +3511,9 @@ type JobRunPreempted struct { // Uuid of the job that caused the preemption. PreemptiveJobId *Uuid `protobuf:"bytes,3,opt,name=preemptive_job_id,json=preemptiveJobId,proto3" json:"preemptiveJobId,omitempty"` // Uuid of the job run that caused the preemption. - PreemptiveRunId *Uuid `protobuf:"bytes,4,opt,name=preemptive_run_id,json=preemptiveRunId,proto3" json:"preemptiveRunId,omitempty"` + PreemptiveRunId *Uuid `protobuf:"bytes,4,opt,name=preemptive_run_id,json=preemptiveRunId,proto3" json:"preemptiveRunId,omitempty"` + PreemptedJobIdStr string `protobuf:"bytes,5,opt,name=preempted_job_id_str,json=preemptedJobIdStr,proto3" json:"preemptedJobIdStr,omitempty"` + PreemptedRunIdStr string `protobuf:"bytes,6,opt,name=preempted_run_id_str,json=preemptedRunIdStr,proto3" json:"preemptedRunIdStr,omitempty"` } func (m *JobRunPreempted) Reset() { *m = JobRunPreempted{} } @@ -3373,6 +3577,20 @@ func (m *JobRunPreempted) GetPreemptiveRunId() *Uuid { return nil } +func (m *JobRunPreempted) GetPreemptedJobIdStr() string { + if m != nil { + return m.PreemptedJobIdStr + } + return "" +} + +func (m *JobRunPreempted) GetPreemptedRunIdStr() string { + if m != nil { + return m.PreemptedRunIdStr + } + return "" +} + // Message used internally by Armada to see if messages can be propagated through a pulsar partition type PartitionMarker struct { // group id ties together multiple messages across different partitions @@ -3430,8 +3648,10 @@ func (m *PartitionMarker) GetPartition() uint32 { // Indicates that the scheduler has requested for the job run to be pre-empted. type JobRunPreemptionRequested struct { - RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` - JobId *Uuid `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` + JobId *Uuid `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,4,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` } func (m *JobRunPreemptionRequested) Reset() { *m = JobRunPreemptionRequested{} } @@ -3481,9 +3701,24 @@ func (m *JobRunPreemptionRequested) GetJobId() *Uuid { return nil } +func (m *JobRunPreemptionRequested) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunPreemptionRequested) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + // Indicates that a user has requested for the job to be pre-empted. type JobPreemptionRequested struct { - JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + JobIdStr string `protobuf:"bytes,2,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } func (m *JobPreemptionRequested) Reset() { *m = JobPreemptionRequested{} } @@ -3526,309 +3761,461 @@ func (m *JobPreemptionRequested) GetJobId() *Uuid { return nil } -func init() { - proto.RegisterEnum("armadaevents.JobState", JobState_name, JobState_value) - proto.RegisterEnum("armadaevents.KubernetesReason", KubernetesReason_name, KubernetesReason_value) - proto.RegisterType((*EventSequence)(nil), "armadaevents.EventSequence") - proto.RegisterType((*EventSequence_Event)(nil), "armadaevents.EventSequence.Event") - proto.RegisterType((*ResourceUtilisation)(nil), "armadaevents.ResourceUtilisation") - proto.RegisterMapType((map[string]resource.Quantity)(nil), "armadaevents.ResourceUtilisation.MaxResourcesForPeriodEntry") - proto.RegisterMapType((map[string]resource.Quantity)(nil), "armadaevents.ResourceUtilisation.TotalCumulativeUsageEntry") - proto.RegisterType((*Uuid)(nil), "armadaevents.Uuid") - proto.RegisterType((*SubmitJob)(nil), "armadaevents.SubmitJob") - proto.RegisterType((*KubernetesMainObject)(nil), "armadaevents.KubernetesMainObject") - proto.RegisterType((*KubernetesObject)(nil), "armadaevents.KubernetesObject") - proto.RegisterType((*ObjectMeta)(nil), "armadaevents.ObjectMeta") - proto.RegisterMapType((map[string]string)(nil), "armadaevents.ObjectMeta.AnnotationsEntry") - proto.RegisterMapType((map[string]string)(nil), "armadaevents.ObjectMeta.LabelsEntry") - proto.RegisterType((*PodSpecWithAvoidList)(nil), "armadaevents.PodSpecWithAvoidList") - proto.RegisterType((*ReprioritiseJob)(nil), "armadaevents.ReprioritiseJob") - proto.RegisterType((*JobRequeued)(nil), "armadaevents.JobRequeued") - proto.RegisterType((*ReprioritiseJobSet)(nil), "armadaevents.ReprioritiseJobSet") - proto.RegisterType((*ReprioritisedJob)(nil), "armadaevents.ReprioritisedJob") - proto.RegisterType((*CancelJob)(nil), "armadaevents.CancelJob") - proto.RegisterType((*JobSetFilter)(nil), "armadaevents.JobSetFilter") - proto.RegisterType((*CancelJobSet)(nil), "armadaevents.CancelJobSet") - proto.RegisterType((*CancelledJob)(nil), "armadaevents.CancelledJob") - proto.RegisterType((*JobSucceeded)(nil), "armadaevents.JobSucceeded") - proto.RegisterType((*JobRunLeased)(nil), "armadaevents.JobRunLeased") - proto.RegisterType((*JobRunAssigned)(nil), "armadaevents.JobRunAssigned") - proto.RegisterType((*JobRunRunning)(nil), "armadaevents.JobRunRunning") - proto.RegisterType((*KubernetesResourceInfo)(nil), "armadaevents.KubernetesResourceInfo") - proto.RegisterType((*PodInfo)(nil), "armadaevents.PodInfo") - proto.RegisterType((*IngressInfo)(nil), "armadaevents.IngressInfo") - proto.RegisterMapType((map[int32]string)(nil), "armadaevents.IngressInfo.IngressAddressesEntry") - proto.RegisterType((*StandaloneIngressInfo)(nil), "armadaevents.StandaloneIngressInfo") - proto.RegisterMapType((map[int32]string)(nil), "armadaevents.StandaloneIngressInfo.IngressAddressesEntry") - proto.RegisterType((*JobRunSucceeded)(nil), "armadaevents.JobRunSucceeded") - proto.RegisterType((*JobErrors)(nil), "armadaevents.JobErrors") - proto.RegisterType((*JobRunErrors)(nil), "armadaevents.JobRunErrors") - proto.RegisterType((*Error)(nil), "armadaevents.Error") - proto.RegisterType((*KubernetesError)(nil), "armadaevents.KubernetesError") - proto.RegisterType((*PodError)(nil), "armadaevents.PodError") - proto.RegisterType((*ContainerError)(nil), "armadaevents.ContainerError") - proto.RegisterType((*PodLeaseReturned)(nil), "armadaevents.PodLeaseReturned") - proto.RegisterType((*PodTerminated)(nil), "armadaevents.PodTerminated") - proto.RegisterType((*ExecutorError)(nil), "armadaevents.ExecutorError") - proto.RegisterType((*PodUnschedulable)(nil), "armadaevents.PodUnschedulable") - proto.RegisterType((*LeaseExpired)(nil), "armadaevents.LeaseExpired") - proto.RegisterType((*MaxRunsExceeded)(nil), "armadaevents.MaxRunsExceeded") - proto.RegisterType((*JobRunPreemptedError)(nil), "armadaevents.JobRunPreemptedError") - proto.RegisterType((*GangJobUnschedulable)(nil), "armadaevents.GangJobUnschedulable") - proto.RegisterType((*JobDuplicateDetected)(nil), "armadaevents.JobDuplicateDetected") - proto.RegisterType((*JobRunPreempted)(nil), "armadaevents.JobRunPreempted") - proto.RegisterType((*PartitionMarker)(nil), "armadaevents.PartitionMarker") - proto.RegisterType((*JobRunPreemptionRequested)(nil), "armadaevents.JobRunPreemptionRequested") - proto.RegisterType((*JobPreemptionRequested)(nil), "armadaevents.JobPreemptionRequested") +func (m *JobPreemptionRequested) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" } -func init() { proto.RegisterFile("pkg/armadaevents/events.proto", fileDescriptor_6aab92ca59e015f8) } - -var fileDescriptor_6aab92ca59e015f8 = []byte{ - // 3635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0x49, 0x6c, 0x1b, 0xd7, - 0xf9, 0xf7, 0x90, 0x12, 0x97, 0x8f, 0x5a, 0xe8, 0x67, 0x49, 0xa6, 0x15, 0x5b, 0x94, 0xc7, 0xf9, - 0xff, 0xe3, 0x04, 0x09, 0x95, 0x38, 0x0b, 0xb2, 0x14, 0x09, 0x44, 0x5b, 0xb1, 0xad, 0x58, 0xb6, - 0x42, 0x59, 0xa9, 0x1b, 0xa4, 0x60, 0x86, 0x9c, 0x27, 0x6a, 0x2c, 0x72, 0x86, 0x99, 0x45, 0x96, - 0x80, 0x1c, 0x9a, 0x22, 0x4d, 0x6f, 0xa9, 0x81, 0x06, 0x68, 0x81, 0x1e, 0xd2, 0x6b, 0x03, 0xf4, - 0xdc, 0x73, 0x4f, 0xcd, 0xa1, 0x28, 0xd2, 0x5b, 0x4f, 0x6c, 0x91, 0xa0, 0x17, 0x1e, 0x8a, 0x1e, - 0xdb, 0x5e, 0x5a, 0xbc, 0x6d, 0xe6, 0xbd, 0x99, 0xa1, 0x2d, 0x6f, 0x75, 0x0a, 0x9f, 0xa4, 0xf9, - 0x7d, 0xeb, 0xdb, 0xbe, 0xf9, 0xbe, 0x6f, 0x1e, 0xe1, 0x44, 0x7f, 0xa7, 0xb3, 0x64, 0xb8, 0x3d, - 0xc3, 0x34, 0xf0, 0x2e, 0xb6, 0x7d, 0x6f, 0x89, 0xfd, 0xa9, 0xf5, 0x5d, 0xc7, 0x77, 0xd0, 0x84, - 0x4c, 0x9a, 0xd7, 0x77, 0x5e, 0xf6, 0x6a, 0x96, 0xb3, 0x64, 0xf4, 0xad, 0xa5, 0xb6, 0xe3, 0xe2, - 0xa5, 0xdd, 0xe7, 0x96, 0x3a, 0xd8, 0xc6, 0xae, 0xe1, 0x63, 0x93, 0x49, 0xcc, 0x9f, 0x96, 0x78, - 0x6c, 0xec, 0xdf, 0x70, 0xdc, 0x1d, 0xcb, 0xee, 0xa4, 0x71, 0x56, 0x3b, 0x8e, 0xd3, 0xe9, 0xe2, - 0x25, 0xfa, 0xd4, 0x0a, 0xb6, 0x96, 0x7c, 0xab, 0x87, 0x3d, 0xdf, 0xe8, 0xf5, 0x39, 0xc3, 0x0b, - 0x91, 0xaa, 0x9e, 0xd1, 0xde, 0xb6, 0x6c, 0xec, 0xee, 0x2f, 0x51, 0x7f, 0xfb, 0xd6, 0x92, 0x8b, - 0x3d, 0x27, 0x70, 0xdb, 0x38, 0xa1, 0xf6, 0x99, 0x8e, 0xe5, 0x6f, 0x07, 0xad, 0x5a, 0xdb, 0xe9, - 0x2d, 0x75, 0x9c, 0x8e, 0x13, 0xe9, 0x27, 0x4f, 0xf4, 0x81, 0xfe, 0xc7, 0xd9, 0x5f, 0xb5, 0x6c, - 0x1f, 0xbb, 0xb6, 0xd1, 0x5d, 0xf2, 0xda, 0xdb, 0xd8, 0x0c, 0xba, 0xd8, 0x8d, 0xfe, 0x73, 0x5a, - 0xd7, 0x71, 0xdb, 0xf7, 0x12, 0x00, 0x93, 0xd5, 0x7f, 0x36, 0x0b, 0x93, 0x2b, 0x64, 0x6a, 0x36, - 0xf0, 0x07, 0x01, 0xb6, 0xdb, 0x18, 0x3d, 0x09, 0xe3, 0x1f, 0x04, 0x38, 0xc0, 0x15, 0x6d, 0x51, - 0x3b, 0x5d, 0xac, 0x1f, 0x19, 0x0e, 0xaa, 0xd3, 0x14, 0x78, 0xda, 0xe9, 0x59, 0x3e, 0xee, 0xf5, - 0xfd, 0xfd, 0x06, 0xe3, 0x40, 0xaf, 0xc2, 0xc4, 0x75, 0xa7, 0xd5, 0xf4, 0xb0, 0xdf, 0xb4, 0x8d, - 0x1e, 0xae, 0x64, 0xa8, 0x44, 0x65, 0x38, 0xa8, 0xce, 0x5c, 0x77, 0x5a, 0x1b, 0xd8, 0xbf, 0x6c, - 0xf4, 0x64, 0x31, 0x88, 0x50, 0xf4, 0x0c, 0xe4, 0x03, 0x0f, 0xbb, 0x4d, 0xcb, 0xac, 0x64, 0xa9, - 0xd8, 0xcc, 0x70, 0x50, 0x2d, 0x13, 0xe8, 0xa2, 0x29, 0x89, 0xe4, 0x18, 0x82, 0x9e, 0x86, 0x5c, - 0xc7, 0x75, 0x82, 0xbe, 0x57, 0x19, 0x5b, 0xcc, 0x0a, 0x6e, 0x86, 0xc8, 0xdc, 0x0c, 0x41, 0x57, - 0x20, 0xc7, 0xd6, 0xbb, 0x32, 0xbe, 0x98, 0x3d, 0x5d, 0x3a, 0x73, 0xb2, 0x26, 0x6f, 0x82, 0x9a, - 0x32, 0x60, 0xf6, 0xc4, 0x14, 0x32, 0xba, 0xac, 0x90, 0x6f, 0x9b, 0xbf, 0x23, 0x18, 0xa7, 0x7c, - 0xe8, 0x0a, 0xe4, 0xdb, 0x2e, 0x26, 0x8b, 0x55, 0x41, 0x8b, 0xda, 0xe9, 0xd2, 0x99, 0xf9, 0x1a, - 0xdb, 0x04, 0x35, 0xb1, 0x48, 0xb5, 0xab, 0x62, 0x13, 0xd4, 0x8f, 0x0d, 0x07, 0xd5, 0xc3, 0x9c, - 0x3d, 0xd2, 0x7a, 0xf3, 0xcf, 0x55, 0xad, 0x21, 0xb4, 0xa0, 0x75, 0x28, 0x7a, 0x41, 0xab, 0x67, - 0xf9, 0xab, 0x4e, 0x8b, 0xce, 0x79, 0xe9, 0xcc, 0x51, 0xd5, 0xdd, 0x0d, 0x41, 0xae, 0x1f, 0x1d, - 0x0e, 0xaa, 0x47, 0x42, 0xee, 0x48, 0xe3, 0x85, 0x43, 0x8d, 0x48, 0x09, 0xda, 0x86, 0x69, 0x17, - 0xf7, 0x5d, 0xcb, 0x71, 0x2d, 0xdf, 0xf2, 0x30, 0xd1, 0x9b, 0xa1, 0x7a, 0x4f, 0xa8, 0x7a, 0x1b, - 0x2a, 0x53, 0xfd, 0xc4, 0x70, 0x50, 0x3d, 0x16, 0x93, 0x54, 0x6c, 0xc4, 0xd5, 0x22, 0x1f, 0x50, - 0x0c, 0xda, 0xc0, 0x3e, 0x5d, 0xcf, 0xd2, 0x99, 0xc5, 0x5b, 0x1a, 0xdb, 0xc0, 0x7e, 0x7d, 0x71, - 0x38, 0xa8, 0x1e, 0x4f, 0xca, 0x2b, 0x26, 0x53, 0xf4, 0xa3, 0x2e, 0x94, 0x65, 0xd4, 0x24, 0x03, - 0x1c, 0xa3, 0x36, 0x17, 0x46, 0xdb, 0x24, 0x5c, 0xf5, 0x85, 0xe1, 0xa0, 0x3a, 0x1f, 0x97, 0x55, - 0xec, 0x25, 0x34, 0x93, 0xf5, 0x69, 0x1b, 0x76, 0x1b, 0x77, 0x89, 0x99, 0xf1, 0xb4, 0xf5, 0x39, - 0x2b, 0xc8, 0x6c, 0x7d, 0x42, 0x6e, 0x75, 0x7d, 0x42, 0x18, 0xbd, 0x07, 0x13, 0xe1, 0x03, 0x99, - 0xaf, 0x1c, 0xdf, 0x47, 0xe9, 0x4a, 0xc9, 0x4c, 0xcd, 0x0f, 0x07, 0xd5, 0x39, 0x59, 0x46, 0x51, - 0xad, 0x68, 0x8b, 0xb4, 0x77, 0xd9, 0xcc, 0xe4, 0x47, 0x6b, 0x67, 0x1c, 0xb2, 0xf6, 0x6e, 0x72, - 0x46, 0x14, 0x6d, 0x44, 0x3b, 0x39, 0xc4, 0x41, 0xbb, 0x8d, 0xb1, 0x89, 0xcd, 0x4a, 0x21, 0x4d, - 0xfb, 0xaa, 0xc4, 0xc1, 0xb4, 0xcb, 0x32, 0xaa, 0x76, 0x99, 0x42, 0xe6, 0xfa, 0xba, 0xd3, 0x5a, - 0x71, 0x5d, 0xc7, 0xf5, 0x2a, 0xc5, 0xb4, 0xb9, 0x5e, 0x15, 0x64, 0x36, 0xd7, 0x21, 0xb7, 0x3a, - 0xd7, 0x21, 0xcc, 0xfd, 0x6d, 0x04, 0xf6, 0x25, 0x6c, 0x78, 0xd8, 0xac, 0xc0, 0x08, 0x7f, 0x43, - 0x8e, 0xd0, 0xdf, 0x10, 0x49, 0xf8, 0x1b, 0x52, 0x90, 0x09, 0x53, 0xec, 0x79, 0xd9, 0xf3, 0xac, - 0x8e, 0x8d, 0xcd, 0x4a, 0x89, 0xea, 0x3f, 0x9e, 0xa6, 0x5f, 0xf0, 0xd4, 0x8f, 0x0f, 0x07, 0xd5, - 0x8a, 0x2a, 0xa7, 0xd8, 0x88, 0xe9, 0x44, 0xef, 0xc3, 0x24, 0x43, 0x1a, 0x81, 0x6d, 0x5b, 0x76, - 0xa7, 0x32, 0x41, 0x8d, 0x3c, 0x96, 0x66, 0x84, 0xb3, 0xd4, 0x1f, 0x1b, 0x0e, 0xaa, 0x47, 0x15, - 0x29, 0xc5, 0x84, 0xaa, 0x90, 0x44, 0x0c, 0x06, 0x44, 0x0b, 0x3b, 0x99, 0x16, 0x31, 0x56, 0x55, - 0x26, 0x16, 0x31, 0x62, 0x92, 0x6a, 0xc4, 0x88, 0x11, 0xa3, 0xf5, 0xe0, 0x8b, 0x3c, 0x35, 0x7a, - 0x3d, 0xf8, 0x3a, 0x4b, 0xeb, 0x91, 0xb2, 0xd4, 0x8a, 0x36, 0xf4, 0x21, 0x90, 0x17, 0xcf, 0xb9, - 0xa0, 0xdf, 0xb5, 0xda, 0x86, 0x8f, 0xcf, 0x61, 0x1f, 0xb7, 0x49, 0xa4, 0x9e, 0xa6, 0x56, 0xf4, - 0x84, 0x95, 0x04, 0x67, 0x5d, 0x1f, 0x0e, 0xaa, 0x0b, 0x69, 0x3a, 0x14, 0xab, 0xa9, 0x56, 0xd0, - 0x0f, 0x34, 0x98, 0xf5, 0x7c, 0xc3, 0x36, 0x8d, 0xae, 0x63, 0xe3, 0x8b, 0x76, 0xc7, 0xc5, 0x9e, - 0x77, 0xd1, 0xde, 0x72, 0x2a, 0x65, 0x6a, 0xff, 0x54, 0x2c, 0xac, 0xa7, 0xb1, 0xd6, 0x4f, 0x0d, - 0x07, 0xd5, 0x6a, 0xaa, 0x16, 0xc5, 0x83, 0x74, 0x43, 0x68, 0x0f, 0x8e, 0x88, 0xac, 0x62, 0xd3, - 0xb7, 0xba, 0x96, 0x67, 0xf8, 0x96, 0x63, 0x57, 0x0e, 0x53, 0xfb, 0x27, 0xe3, 0xd1, 0x31, 0xc1, - 0x58, 0x3f, 0x39, 0x1c, 0x54, 0x4f, 0xa4, 0x68, 0x50, 0x6c, 0xa7, 0x99, 0x88, 0xb6, 0xd0, 0xba, - 0x8b, 0x09, 0x23, 0x36, 0x2b, 0x47, 0x46, 0x6f, 0xa1, 0x90, 0x49, 0xde, 0x42, 0x21, 0x98, 0xb6, - 0x85, 0x42, 0x22, 0xb1, 0xd4, 0x37, 0x5c, 0xdf, 0x22, 0x66, 0xd7, 0x0c, 0x77, 0x07, 0xbb, 0x95, - 0x99, 0x34, 0x4b, 0xeb, 0x2a, 0x13, 0xb3, 0x14, 0x93, 0x54, 0x2d, 0xc5, 0x88, 0xe8, 0xa6, 0x06, - 0xaa, 0x6b, 0x96, 0x63, 0x37, 0x48, 0xda, 0xe0, 0x91, 0xe1, 0xcd, 0x52, 0xa3, 0x4f, 0xdc, 0x62, - 0x78, 0x32, 0x7b, 0xfd, 0x89, 0xe1, 0xa0, 0x7a, 0x6a, 0xa4, 0x36, 0xc5, 0x91, 0xd1, 0x46, 0xd1, - 0x35, 0x28, 0x11, 0x22, 0xa6, 0x09, 0x98, 0x59, 0x99, 0xa3, 0x3e, 0x1c, 0x4b, 0xfa, 0xc0, 0x19, - 0x68, 0x06, 0x32, 0x2b, 0x49, 0x28, 0x76, 0x64, 0x55, 0xe8, 0x63, 0x0d, 0xc8, 0x31, 0x4b, 0x1b, - 0xe9, 0x51, 0x6a, 0xe5, 0xf1, 0x84, 0x95, 0xb4, 0x61, 0x3e, 0x3e, 0x1c, 0x54, 0x17, 0xd3, 0xf5, - 0x28, 0xb6, 0x47, 0xd8, 0xaa, 0xe7, 0x61, 0x9c, 0x1a, 0xd0, 0x87, 0x39, 0x38, 0x92, 0xb2, 0x45, - 0xd1, 0xeb, 0x90, 0x73, 0x03, 0x9b, 0xe4, 0x8d, 0x2c, 0x59, 0x42, 0xaa, 0x5b, 0x9b, 0x81, 0x65, - 0xb2, 0xa4, 0xd5, 0x0d, 0x6c, 0x25, 0x95, 0x1c, 0xa7, 0x00, 0x91, 0x27, 0x49, 0xab, 0x65, 0xf2, - 0xa4, 0x68, 0xa4, 0xfc, 0x75, 0xa7, 0xa5, 0xca, 0x53, 0x00, 0x61, 0x98, 0x14, 0xfb, 0xbf, 0x69, - 0x91, 0xc3, 0x9d, 0x4d, 0x9b, 0x9d, 0xb7, 0x82, 0x16, 0x76, 0x6d, 0xec, 0x63, 0x4f, 0x8c, 0x81, - 0x9e, 0x6e, 0x1a, 0xcc, 0x5c, 0x09, 0x91, 0xf4, 0x4f, 0xc8, 0x38, 0xfa, 0x4c, 0x83, 0x4a, 0xcf, - 0xd8, 0x6b, 0x0a, 0xd0, 0x6b, 0x6e, 0x39, 0x6e, 0xb3, 0x8f, 0x5d, 0xcb, 0x31, 0x69, 0x0e, 0x5c, - 0x3a, 0xf3, 0x9d, 0xdb, 0x9e, 0xe7, 0xda, 0x9a, 0xb1, 0x27, 0x60, 0xef, 0x4d, 0xc7, 0x5d, 0xa7, - 0xe2, 0x2b, 0xb6, 0xef, 0xee, 0xd7, 0x4f, 0x7c, 0x39, 0xa8, 0x1e, 0x22, 0xbb, 0xa3, 0x97, 0xc6, - 0xd3, 0x48, 0x87, 0xd1, 0x4f, 0x34, 0x98, 0xf3, 0x1d, 0xdf, 0xe8, 0x36, 0xdb, 0x41, 0x2f, 0xe8, - 0x1a, 0xbe, 0xb5, 0x8b, 0x9b, 0x81, 0x67, 0x74, 0x30, 0x4f, 0xb5, 0x5f, 0xbb, 0xbd, 0x53, 0x57, - 0x89, 0xfc, 0xd9, 0x50, 0x7c, 0x93, 0x48, 0x33, 0x9f, 0x8e, 0x73, 0x9f, 0x66, 0xfc, 0x14, 0x96, - 0x46, 0x2a, 0x3a, 0xff, 0x4b, 0x0d, 0xe6, 0x47, 0x0f, 0x13, 0x9d, 0x82, 0xec, 0x0e, 0xde, 0xe7, - 0xc5, 0xcc, 0xe1, 0xe1, 0xa0, 0x3a, 0xb9, 0x83, 0xf7, 0xa5, 0x59, 0x27, 0x54, 0xf4, 0x3d, 0x18, - 0xdf, 0x35, 0xba, 0x01, 0xe6, 0x5b, 0xa2, 0x56, 0x63, 0x65, 0x5b, 0x4d, 0x2e, 0xdb, 0x6a, 0xfd, - 0x9d, 0x0e, 0x01, 0x6a, 0x62, 0x45, 0x6a, 0x6f, 0x07, 0x86, 0xed, 0x5b, 0xfe, 0x3e, 0xdb, 0x2e, - 0x54, 0x81, 0xbc, 0x5d, 0x28, 0xf0, 0x6a, 0xe6, 0x65, 0x6d, 0xfe, 0x73, 0x0d, 0x8e, 0x8d, 0x1c, - 0xf4, 0xb7, 0xc1, 0x43, 0xbd, 0x09, 0x63, 0x64, 0xe3, 0x93, 0x32, 0x6b, 0xdb, 0xea, 0x6c, 0xbf, - 0xf4, 0x02, 0x75, 0x27, 0xc7, 0xaa, 0x22, 0x86, 0xc8, 0x55, 0x11, 0x43, 0x48, 0xa9, 0xd8, 0x75, - 0x6e, 0xbc, 0xf4, 0x02, 0x75, 0x2a, 0xc7, 0x8c, 0x50, 0x40, 0x36, 0x42, 0x01, 0xfd, 0xdf, 0x39, - 0x28, 0x86, 0x75, 0x8c, 0x74, 0x06, 0xb5, 0xbb, 0x3a, 0x83, 0x17, 0xa0, 0x6c, 0x62, 0x93, 0xbf, - 0x80, 0x2d, 0xc7, 0x16, 0xa7, 0xb9, 0xc8, 0x82, 0xbc, 0x42, 0x53, 0xe4, 0xa7, 0x63, 0x24, 0x74, - 0x06, 0x0a, 0x3c, 0xdf, 0xdf, 0xa7, 0x07, 0x79, 0xb2, 0x3e, 0x37, 0x1c, 0x54, 0x91, 0xc0, 0x24, - 0xd1, 0x90, 0x0f, 0x35, 0x00, 0x58, 0x11, 0xbd, 0x86, 0x7d, 0x83, 0x57, 0x1e, 0x15, 0x75, 0x04, - 0x57, 0x42, 0x3a, 0x2b, 0x87, 0x23, 0x7e, 0xb9, 0x1c, 0x8e, 0x50, 0xf4, 0x1e, 0x40, 0xcf, 0xb0, - 0x6c, 0x26, 0xc7, 0xcb, 0x0c, 0x7d, 0x54, 0x48, 0x59, 0x0b, 0x39, 0x99, 0xf6, 0x48, 0x52, 0xd6, - 0x1e, 0xa1, 0xa4, 0x68, 0xe5, 0x65, 0x7f, 0x25, 0x47, 0x4f, 0xe9, 0xc2, 0x28, 0xd5, 0x5c, 0xed, - 0x2c, 0x29, 0x5c, 0xb9, 0x88, 0xa4, 0x53, 0x68, 0x21, 0xd3, 0xd6, 0xb5, 0xb6, 0xb0, 0x6f, 0xf5, - 0x30, 0x2d, 0x30, 0xf8, 0xb4, 0x09, 0x4c, 0x9e, 0x36, 0x81, 0xa1, 0x97, 0x01, 0x0c, 0x7f, 0xcd, - 0xf1, 0xfc, 0x2b, 0x76, 0x1b, 0xd3, 0xc2, 0xa1, 0xc0, 0xdc, 0x8f, 0x50, 0xd9, 0xfd, 0x08, 0x45, - 0xaf, 0x41, 0xa9, 0xcf, 0x5f, 0x15, 0xad, 0x2e, 0xa6, 0x85, 0x41, 0x81, 0xbd, 0xd9, 0x24, 0x58, - 0x92, 0x95, 0xb9, 0xd1, 0x79, 0x98, 0x6e, 0x3b, 0x76, 0x3b, 0x70, 0x5d, 0x6c, 0xb7, 0xf7, 0x37, - 0x8c, 0x2d, 0x4c, 0x8b, 0x80, 0x02, 0xdb, 0x2a, 0x31, 0x92, 0xbc, 0x55, 0x62, 0x24, 0xf4, 0x22, - 0x14, 0xc3, 0x26, 0x0a, 0xcd, 0xf3, 0x8b, 0xbc, 0x1e, 0x17, 0xa0, 0x24, 0x1c, 0x71, 0x12, 0xe7, - 0x2d, 0x2f, 0x4c, 0x16, 0x69, 0xee, 0xce, 0x9d, 0x97, 0x60, 0xd9, 0x79, 0x09, 0x46, 0x17, 0xe1, - 0x30, 0x7d, 0x3d, 0x37, 0x7d, 0xbf, 0xdb, 0xf4, 0x70, 0xdb, 0xb1, 0x4d, 0x8f, 0xa6, 0xe6, 0x59, - 0xe6, 0x3e, 0x25, 0x5e, 0xf5, 0xbb, 0x1b, 0x8c, 0x24, 0xbb, 0x1f, 0x23, 0xe9, 0xbf, 0xd7, 0x60, - 0x26, 0x6d, 0x0b, 0xc5, 0xb6, 0xb3, 0x76, 0x5f, 0xb6, 0xf3, 0x3b, 0x50, 0xe8, 0x3b, 0x66, 0xd3, - 0xeb, 0xe3, 0x36, 0x8f, 0x58, 0xb1, 0xcd, 0xbc, 0xee, 0x98, 0x1b, 0x7d, 0xdc, 0xfe, 0xae, 0xe5, - 0x6f, 0x2f, 0xef, 0x3a, 0x96, 0x79, 0xc9, 0xf2, 0xf8, 0xae, 0xeb, 0x33, 0x8a, 0x92, 0x2c, 0xe4, - 0x39, 0x58, 0x2f, 0x40, 0x8e, 0x59, 0xd1, 0xff, 0x90, 0x85, 0x72, 0x7c, 0xdb, 0xfe, 0x2f, 0x0d, - 0x05, 0x5d, 0x83, 0xbc, 0xc5, 0x32, 0x77, 0x9e, 0x41, 0xfc, 0x9f, 0x14, 0xd3, 0x6b, 0x51, 0xdf, - 0xb1, 0xb6, 0xfb, 0x5c, 0x8d, 0xa7, 0xf8, 0x74, 0x0a, 0xa8, 0x66, 0x2e, 0xa9, 0x6a, 0xe6, 0x20, - 0x6a, 0x40, 0xde, 0xc3, 0xee, 0xae, 0xd5, 0xc6, 0x3c, 0x38, 0x55, 0x65, 0xcd, 0x6d, 0xc7, 0xc5, - 0x44, 0xe7, 0x06, 0x63, 0x89, 0x74, 0x72, 0x19, 0x55, 0x27, 0x07, 0xd1, 0x3b, 0x50, 0x6c, 0x3b, - 0xf6, 0x96, 0xd5, 0x59, 0x33, 0xfa, 0x3c, 0x3c, 0x9d, 0x48, 0xd3, 0x7a, 0x56, 0x30, 0xf1, 0x5e, - 0x88, 0x78, 0x8c, 0xf5, 0x42, 0x42, 0xae, 0x68, 0x41, 0xff, 0x36, 0x06, 0x10, 0x2d, 0x0e, 0x7a, - 0x05, 0x4a, 0x78, 0x0f, 0xb7, 0x03, 0xdf, 0x71, 0xc5, 0x7b, 0x82, 0xb7, 0x16, 0x05, 0xac, 0x04, - 0x76, 0x88, 0x50, 0x72, 0x50, 0x6d, 0xa3, 0x87, 0xbd, 0xbe, 0xd1, 0x16, 0x3d, 0x49, 0xea, 0x4c, - 0x08, 0xca, 0x07, 0x35, 0x04, 0xd1, 0xff, 0xc3, 0x18, 0xed, 0x62, 0xb2, 0x76, 0x24, 0x1a, 0x0e, - 0xaa, 0x53, 0xb6, 0xda, 0xbf, 0xa4, 0x74, 0xf4, 0x06, 0x4c, 0xee, 0x84, 0x1b, 0x8f, 0xf8, 0x36, - 0x46, 0x05, 0x68, 0x6a, 0x17, 0x11, 0x14, 0xef, 0x26, 0x64, 0x1c, 0x6d, 0x41, 0xc9, 0xb0, 0x6d, - 0xc7, 0xa7, 0xef, 0x20, 0xd1, 0xa2, 0x7c, 0x72, 0xd4, 0x36, 0xad, 0x2d, 0x47, 0xbc, 0x2c, 0x4b, - 0xa2, 0xc1, 0x43, 0xd2, 0x20, 0x07, 0x0f, 0x09, 0x46, 0x0d, 0xc8, 0x75, 0x8d, 0x16, 0xee, 0x8a, - 0xa0, 0xff, 0xf8, 0x48, 0x13, 0x97, 0x28, 0x1b, 0xd3, 0x4e, 0x5f, 0xf9, 0x4c, 0x4e, 0x7e, 0xe5, - 0x33, 0x64, 0x7e, 0x0b, 0xca, 0x71, 0x7f, 0x0e, 0x96, 0xc0, 0x3c, 0x29, 0x27, 0x30, 0xc5, 0xdb, - 0xa6, 0x4c, 0x06, 0x94, 0x24, 0xa7, 0x1e, 0x84, 0x09, 0xfd, 0x57, 0x1a, 0xcc, 0xa4, 0x9d, 0x5d, - 0xb4, 0x26, 0x9d, 0x78, 0x8d, 0xb7, 0x5a, 0x52, 0xb6, 0x3a, 0x97, 0x1d, 0x71, 0xd4, 0xa3, 0x83, - 0x5e, 0x87, 0x29, 0xdb, 0x31, 0x71, 0xd3, 0x20, 0x06, 0xba, 0x96, 0xe7, 0x57, 0x32, 0xb4, 0x85, - 0x4d, 0x5b, 0x34, 0x84, 0xb2, 0x2c, 0x08, 0x92, 0xf4, 0xa4, 0x42, 0xd0, 0x7f, 0xa4, 0xc1, 0x74, - 0xac, 0x83, 0x7a, 0xcf, 0x49, 0x94, 0x9c, 0xfa, 0x64, 0x0e, 0x96, 0xfa, 0xe8, 0x3f, 0xcd, 0x40, - 0x49, 0x2a, 0x2f, 0xef, 0xd9, 0x87, 0xeb, 0x30, 0xcd, 0xdf, 0x94, 0x96, 0xdd, 0x61, 0xe5, 0x54, - 0x86, 0xf7, 0x4a, 0x12, 0x1f, 0x2c, 0x56, 0x9d, 0xd6, 0x46, 0xc8, 0x4b, 0xab, 0x29, 0xda, 0x48, - 0xf3, 0x14, 0x4c, 0x32, 0x31, 0xa5, 0x52, 0xd0, 0x35, 0x98, 0x0b, 0xfa, 0xa6, 0xe1, 0xe3, 0xa6, - 0xc7, 0x5b, 0xff, 0x4d, 0x3b, 0xe8, 0xb5, 0xb0, 0x4b, 0x4f, 0xfc, 0x38, 0x6b, 0xfd, 0x30, 0x0e, - 0xf1, 0x6d, 0xe0, 0x32, 0xa5, 0x4b, 0x3a, 0x67, 0xd2, 0xe8, 0xfa, 0x05, 0x40, 0xc9, 0xf6, 0xb6, - 0x32, 0xbf, 0xda, 0x01, 0xe7, 0xf7, 0x13, 0x0d, 0xca, 0xf1, 0xae, 0xf5, 0x43, 0x59, 0xe8, 0x7d, - 0x28, 0x86, 0x1d, 0xe8, 0x7b, 0x76, 0xe0, 0x69, 0xc8, 0xb9, 0xd8, 0xf0, 0x1c, 0x9b, 0x9f, 0x4c, - 0x1a, 0x62, 0x18, 0x22, 0x87, 0x18, 0x86, 0xe8, 0x57, 0x61, 0x82, 0xcd, 0xe0, 0x9b, 0x56, 0xd7, - 0xc7, 0x2e, 0x3a, 0x07, 0x39, 0xcf, 0x37, 0x7c, 0xec, 0x55, 0xb4, 0xc5, 0xec, 0xe9, 0xa9, 0x33, - 0x73, 0xc9, 0x66, 0x33, 0x21, 0x33, 0xad, 0x8c, 0x53, 0xd6, 0xca, 0x10, 0xfd, 0x87, 0x1a, 0x4c, - 0xc8, 0x3d, 0xf5, 0xfb, 0xa3, 0xf6, 0x0e, 0x87, 0xf6, 0xa1, 0xf0, 0xa1, 0x7b, 0x7f, 0x56, 0xf6, - 0xce, 0xac, 0xff, 0x46, 0x63, 0x33, 0x1b, 0x36, 0x63, 0xef, 0xd5, 0x7c, 0x27, 0x6a, 0x85, 0x90, - 0x13, 0xe6, 0xd1, 0xc0, 0x76, 0xd0, 0x56, 0x08, 0x0d, 0x7f, 0x8a, 0xb8, 0x1c, 0xfe, 0x14, 0x82, - 0xfe, 0xd9, 0x38, 0xf5, 0x3c, 0x6a, 0xbc, 0x3f, 0xec, 0x26, 0x50, 0x2c, 0x3b, 0xc9, 0xde, 0x41, - 0x76, 0xf2, 0x0c, 0xe4, 0xe9, 0xeb, 0x20, 0x4c, 0x1c, 0xe8, 0xa2, 0x11, 0x48, 0xfd, 0xf0, 0xc9, - 0x90, 0x5b, 0x44, 0xad, 0xf1, 0x7b, 0x8b, 0x5a, 0xa8, 0x09, 0xc7, 0xb6, 0x0d, 0xaf, 0x29, 0xe2, - 0xac, 0xd9, 0x34, 0xfc, 0x66, 0x18, 0x27, 0x72, 0xb4, 0x4c, 0xa1, 0xcd, 0xbc, 0x6d, 0xc3, 0xdb, - 0x10, 0x3c, 0xcb, 0xfe, 0x7a, 0x32, 0x6a, 0xcc, 0xa5, 0x73, 0xa0, 0x4d, 0x98, 0x4d, 0x57, 0x9e, - 0xa7, 0x9e, 0xd3, 0x5e, 0xb3, 0x77, 0x4b, 0xcd, 0x47, 0x52, 0xc8, 0xe8, 0x23, 0x0d, 0x2a, 0xe4, - 0xfd, 0xec, 0xe2, 0x0f, 0x02, 0xcb, 0xc5, 0x3d, 0xb2, 0x62, 0x4d, 0x67, 0x17, 0xbb, 0x5d, 0x63, - 0x9f, 0x7f, 0x34, 0x3a, 0x99, 0x7c, 0x7b, 0xac, 0x3b, 0x66, 0x43, 0x12, 0x60, 0x43, 0xeb, 0xab, - 0xe0, 0x15, 0xa6, 0x44, 0x1e, 0x5a, 0x3a, 0xc7, 0xea, 0x58, 0xa1, 0x50, 0x2e, 0xea, 0xff, 0xd4, - 0x60, 0x4a, 0xfd, 0xb6, 0xf3, 0xd0, 0x37, 0x66, 0xe2, 0x48, 0x66, 0x1f, 0xd0, 0x91, 0xfc, 0x87, - 0x06, 0x93, 0xca, 0x27, 0xa7, 0x47, 0x67, 0xe8, 0x3f, 0xcf, 0xc0, 0x5c, 0xba, 0x9a, 0x07, 0x52, - 0x80, 0x5e, 0x00, 0x92, 0x4a, 0x5e, 0x8c, 0x72, 0xa3, 0xd9, 0x44, 0xfd, 0x49, 0x87, 0x20, 0xf2, - 0xd0, 0xc4, 0xb7, 0x22, 0x21, 0x8e, 0xae, 0x41, 0xc9, 0x92, 0xbe, 0x4a, 0x65, 0xd3, 0x3e, 0x1e, - 0xc8, 0xdf, 0xa2, 0x58, 0x97, 0x62, 0xc4, 0x17, 0x28, 0x59, 0x55, 0x3d, 0x07, 0x63, 0x24, 0x79, - 0xd3, 0x77, 0x21, 0xcf, 0xdd, 0x41, 0xcf, 0x43, 0x91, 0xc6, 0x39, 0x5a, 0x53, 0xb1, 0xc4, 0x9d, - 0xa6, 0x1d, 0x04, 0x8c, 0xdd, 0x0b, 0x29, 0x08, 0x0c, 0xbd, 0x04, 0x40, 0x8e, 0x36, 0x8f, 0x70, - 0x19, 0x1a, 0x27, 0x68, 0xed, 0xd6, 0x77, 0xcc, 0x44, 0x58, 0x2b, 0x86, 0xa0, 0xfe, 0xeb, 0x0c, - 0x94, 0xe4, 0xef, 0x60, 0x77, 0x65, 0xfc, 0x43, 0x10, 0x75, 0x75, 0xd3, 0x30, 0x4d, 0xf2, 0x17, - 0x8b, 0x57, 0xda, 0xd2, 0xc8, 0x49, 0x12, 0xff, 0x2f, 0x0b, 0x09, 0x56, 0x45, 0xd1, 0x9b, 0x06, - 0x56, 0x8c, 0x24, 0x59, 0x2d, 0xc7, 0x69, 0xf3, 0x3b, 0x30, 0x9b, 0xaa, 0x4a, 0xae, 0x7d, 0xc6, - 0xef, 0x57, 0xed, 0xf3, 0xdb, 0x71, 0x98, 0x4d, 0xfd, 0xfe, 0xf8, 0xd0, 0x4f, 0xb1, 0x7a, 0x82, - 0xb2, 0xf7, 0xe5, 0x04, 0x7d, 0xa2, 0xa5, 0xad, 0x2c, 0xfb, 0x88, 0xf2, 0xca, 0x01, 0x3e, 0xca, - 0xde, 0xaf, 0x35, 0x56, 0xb7, 0xe5, 0xf8, 0x5d, 0x9d, 0x89, 0xdc, 0x41, 0xcf, 0x04, 0x7a, 0x96, - 0x95, 0xb1, 0xd4, 0x56, 0x9e, 0xda, 0x12, 0x11, 0x22, 0x66, 0x2a, 0xcf, 0x21, 0xf4, 0x06, 0x4c, - 0x0a, 0x09, 0xd6, 0x3c, 0x29, 0x44, 0x9d, 0x0d, 0xce, 0x13, 0xef, 0x9f, 0x4c, 0xc8, 0xf8, 0x7f, - 0x77, 0x0f, 0xff, 0x4b, 0x83, 0xe9, 0xd8, 0x85, 0x84, 0x47, 0xe7, 0x1d, 0xf4, 0xa9, 0x06, 0xc5, - 0xf0, 0x2e, 0xcc, 0x3d, 0x27, 0xf2, 0xcb, 0x90, 0xc3, 0xec, 0x3e, 0x06, 0x0b, 0x77, 0x47, 0x62, - 0xf7, 0xe5, 0x08, 0x8d, 0xdf, 0x90, 0x8b, 0x5d, 0xc1, 0x68, 0x70, 0x41, 0xfd, 0x8f, 0x9a, 0x48, - 0xd1, 0x23, 0x9f, 0x1e, 0xea, 0x52, 0x44, 0x63, 0xca, 0xde, 0xed, 0x98, 0x7e, 0x57, 0x84, 0x71, - 0xca, 0x47, 0x4a, 0x68, 0x1f, 0xbb, 0x3d, 0xcb, 0x36, 0xba, 0x74, 0x38, 0x05, 0x76, 0x6e, 0x05, - 0x26, 0x9f, 0x5b, 0x81, 0xa1, 0x6d, 0x98, 0x8e, 0xda, 0x7e, 0x54, 0x4d, 0xfa, 0x35, 0xbc, 0xb7, - 0x54, 0x26, 0xd6, 0xd8, 0x8f, 0x49, 0xaa, 0xf7, 0x14, 0x62, 0x44, 0x64, 0xc2, 0x54, 0xdb, 0xb1, - 0x7d, 0xc3, 0xb2, 0xb1, 0xcb, 0x0c, 0x65, 0xd3, 0xae, 0x21, 0x9d, 0x55, 0x78, 0x58, 0xf7, 0x44, - 0x95, 0x53, 0xaf, 0x21, 0xa9, 0x34, 0xf4, 0x3e, 0x4c, 0x8a, 0x32, 0x86, 0x19, 0x19, 0x4b, 0xbb, - 0x86, 0xb4, 0x22, 0xb3, 0xb0, 0x2d, 0xad, 0x48, 0xa9, 0xd7, 0x90, 0x14, 0x12, 0xea, 0x42, 0xb9, - 0xef, 0x98, 0x9b, 0x36, 0x4f, 0xde, 0x8d, 0x56, 0x17, 0xf3, 0x5e, 0xf3, 0x42, 0x22, 0xe5, 0x51, - 0xb8, 0x58, 0x28, 0x8e, 0xcb, 0xaa, 0x17, 0xfb, 0xe2, 0x54, 0xf4, 0x1e, 0x4c, 0x74, 0x49, 0x35, - 0xb9, 0xb2, 0xd7, 0xb7, 0x5c, 0x6c, 0xa6, 0x5f, 0xc3, 0xbb, 0x24, 0x71, 0xb0, 0x40, 0x28, 0xcb, - 0xa8, 0x57, 0x91, 0x64, 0x0a, 0x59, 0xfd, 0x9e, 0xb1, 0xd7, 0x08, 0x6c, 0x6f, 0x65, 0x8f, 0x5f, - 0xa9, 0xca, 0xa7, 0xad, 0xfe, 0x9a, 0xca, 0xc4, 0x56, 0x3f, 0x26, 0xa9, 0xae, 0x7e, 0x8c, 0x88, - 0x2e, 0xd1, 0x38, 0xcf, 0x96, 0x84, 0x5d, 0xc7, 0x9b, 0x4b, 0xcc, 0x16, 0x5b, 0x0d, 0xd6, 0xf6, - 0xe1, 0x4f, 0x8a, 0xd2, 0x50, 0x03, 0x5f, 0x03, 0x3a, 0xec, 0x06, 0xf6, 0x03, 0xd7, 0xc6, 0x26, - 0x2f, 0xaa, 0x92, 0x6b, 0xa0, 0x70, 0x85, 0x6b, 0xa0, 0xa0, 0x89, 0x35, 0x50, 0xa8, 0x64, 0x4f, - 0xf5, 0x1d, 0xf3, 0x2a, 0x3b, 0x32, 0x7e, 0x78, 0x3f, 0xef, 0xb1, 0x84, 0xa9, 0x88, 0x85, 0xed, - 0x29, 0x45, 0x4a, 0xdd, 0x53, 0x0a, 0x89, 0x5f, 0x09, 0x93, 0x2f, 0x10, 0xb1, 0x99, 0x2a, 0x8d, - 0xb8, 0x12, 0x96, 0xe0, 0x0c, 0xaf, 0x84, 0x25, 0x28, 0x89, 0x2b, 0x61, 0x09, 0x0e, 0x62, 0xbd, - 0x63, 0xd8, 0x9d, 0x55, 0xa7, 0xa5, 0xee, 0xea, 0x89, 0x34, 0xeb, 0xe7, 0x53, 0x38, 0x99, 0xf5, - 0x34, 0x1d, 0xaa, 0xf5, 0x34, 0x8e, 0x7a, 0x41, 0xb4, 0x87, 0xf4, 0xcf, 0x35, 0x98, 0x8e, 0xc5, - 0x19, 0xf4, 0x3a, 0x84, 0x37, 0x4e, 0xae, 0xee, 0xf7, 0x45, 0x9a, 0xac, 0xdc, 0x50, 0x21, 0x78, - 0xda, 0x0d, 0x15, 0x82, 0xa3, 0x4b, 0x00, 0xe1, 0x3b, 0xe9, 0x56, 0x41, 0x9a, 0xe6, 0x68, 0x11, - 0xa7, 0x9c, 0xa3, 0x45, 0xa8, 0xfe, 0x55, 0x16, 0x0a, 0x62, 0xa3, 0x3e, 0x90, 0x32, 0x6a, 0x09, - 0xf2, 0x3d, 0xec, 0xd1, 0x9b, 0x2a, 0x99, 0x28, 0x1b, 0xe2, 0x90, 0x9c, 0x0d, 0x71, 0x48, 0x4d, - 0xd6, 0xb2, 0x77, 0x95, 0xac, 0x8d, 0x1d, 0x38, 0x59, 0xc3, 0xf4, 0x2b, 0xb5, 0x14, 0x6e, 0xc5, - 0x77, 0xa1, 0x5b, 0xc7, 0x70, 0xf1, 0x0d, 0x5b, 0x16, 0x8c, 0x7d, 0xc3, 0x96, 0x49, 0x68, 0x07, - 0x0e, 0x4b, 0xdf, 0xae, 0x78, 0xef, 0x90, 0x04, 0xbe, 0xa9, 0xd1, 0x57, 0x02, 0x1a, 0x94, 0x8b, - 0x1d, 0xef, 0x9d, 0x18, 0x2a, 0x67, 0xbb, 0x71, 0x9a, 0xfe, 0xd7, 0x0c, 0x4c, 0xa9, 0xfe, 0x3e, - 0x90, 0x85, 0x7d, 0x1e, 0x8a, 0x78, 0xcf, 0xf2, 0x9b, 0x6d, 0xc7, 0xc4, 0xbc, 0x64, 0xa4, 0xeb, - 0x44, 0xc0, 0xb3, 0x8e, 0xa9, 0xac, 0x93, 0xc0, 0xe4, 0xdd, 0x90, 0x3d, 0xd0, 0x6e, 0x88, 0x5a, - 0xad, 0x63, 0xb7, 0x6f, 0xb5, 0xa6, 0xcf, 0x73, 0xf1, 0x01, 0xcd, 0xf3, 0xcd, 0x0c, 0x94, 0xe3, - 0xd1, 0xf8, 0xdb, 0x71, 0x84, 0xd4, 0xd3, 0x90, 0x3d, 0xf0, 0x69, 0x78, 0x03, 0x26, 0x49, 0xee, - 0x68, 0xf8, 0x3e, 0xbf, 0x4a, 0x3a, 0x46, 0x73, 0x2e, 0x16, 0x9b, 0x02, 0x7b, 0x59, 0xe0, 0x4a, - 0x6c, 0x92, 0x70, 0xfd, 0xa3, 0x0c, 0x4c, 0x2a, 0x6f, 0x8d, 0x47, 0x2f, 0xa4, 0xe8, 0xd3, 0x30, - 0xa9, 0x24, 0x63, 0xfa, 0xc7, 0x6c, 0x9f, 0xa8, 0x59, 0xd0, 0xa3, 0x37, 0x2f, 0x53, 0x30, 0x21, - 0x67, 0x75, 0x7a, 0x1d, 0xa6, 0x63, 0x49, 0x98, 0x3c, 0x00, 0xed, 0x20, 0x03, 0xd0, 0xe7, 0x60, - 0x26, 0x2d, 0x77, 0xd0, 0xcf, 0xc3, 0x4c, 0xda, 0x5b, 0xfd, 0xce, 0x0d, 0x7c, 0xa1, 0x51, 0x0b, - 0xc9, 0x4b, 0xe7, 0x17, 0x00, 0x6c, 0x7c, 0xa3, 0x79, 0xdb, 0xf2, 0x8f, 0xcd, 0x27, 0xbe, 0xb1, - 0x1a, 0xab, 0x96, 0x0a, 0x02, 0x23, 0x9a, 0x9c, 0xae, 0xd9, 0xbc, 0x6d, 0xd1, 0x45, 0x35, 0x39, - 0x5d, 0x33, 0xa1, 0x49, 0x60, 0xfa, 0x8f, 0xb3, 0xa2, 0x32, 0x8f, 0x6e, 0x6d, 0xbf, 0x0b, 0xe5, - 0xbe, 0x78, 0xb8, 0xbd, 0xb7, 0xb4, 0x36, 0x09, 0xf9, 0xe3, 0x96, 0xa6, 0x54, 0x8a, 0xaa, 0x9b, - 0x17, 0x9d, 0x99, 0x03, 0xea, 0x6e, 0xc4, 0xaa, 0xcf, 0x29, 0x95, 0x82, 0xbe, 0x0f, 0x87, 0xc5, - 0x6d, 0xb2, 0x5d, 0x2c, 0x1c, 0xcf, 0x8e, 0x54, 0xce, 0x2e, 0x99, 0x87, 0x02, 0x71, 0xcf, 0xa7, - 0x63, 0xa4, 0x98, 0x7a, 0xee, 0xfb, 0xd8, 0x41, 0xd5, 0xc7, 0x9d, 0x9f, 0x8e, 0x91, 0xf4, 0x4f, - 0x35, 0x98, 0x8e, 0xdd, 0x83, 0x47, 0xe7, 0xa0, 0x40, 0x7f, 0x26, 0x77, 0xeb, 0x15, 0xa0, 0x1b, - 0x92, 0xf2, 0x29, 0x16, 0xf2, 0x1c, 0x42, 0x2f, 0x42, 0x31, 0xbc, 0x2e, 0xcf, 0xbf, 0x2a, 0xb3, - 0xc3, 0x27, 0x40, 0xe5, 0xf0, 0x09, 0x50, 0xff, 0x85, 0x06, 0xc7, 0x46, 0xde, 0x91, 0x7f, 0xd8, - 0x3d, 0x03, 0xfd, 0x1a, 0xcc, 0xa5, 0x5f, 0x6b, 0xbf, 0xd7, 0x0e, 0xcb, 0x53, 0xcf, 0x42, 0x41, - 0x7c, 0x51, 0x46, 0x00, 0xb9, 0xb7, 0x37, 0x57, 0x36, 0x57, 0xce, 0x95, 0x0f, 0xa1, 0x12, 0xe4, - 0xd7, 0x57, 0x2e, 0x9f, 0xbb, 0x78, 0xf9, 0x7c, 0x59, 0x23, 0x0f, 0x8d, 0xcd, 0xcb, 0x97, 0xc9, - 0x43, 0xe6, 0xa9, 0x4b, 0xf2, 0xfd, 0x36, 0xf6, 0xa6, 0x47, 0x13, 0x50, 0x58, 0xee, 0xf7, 0x69, - 0x68, 0x61, 0xb2, 0x2b, 0xbb, 0x16, 0x89, 0x02, 0x65, 0x0d, 0xe5, 0x21, 0x7b, 0xe5, 0xca, 0x5a, - 0x39, 0x83, 0x66, 0xa0, 0x7c, 0x0e, 0x1b, 0x66, 0xd7, 0xb2, 0xb1, 0x88, 0x67, 0xe5, 0x6c, 0xfd, - 0xfa, 0x97, 0x5f, 0x2f, 0x68, 0x5f, 0x7d, 0xbd, 0xa0, 0xfd, 0xe5, 0xeb, 0x05, 0xed, 0xe6, 0x37, - 0x0b, 0x87, 0xbe, 0xfa, 0x66, 0xe1, 0xd0, 0x9f, 0xbe, 0x59, 0x38, 0xf4, 0xee, 0xb3, 0xd2, 0x8f, - 0x4d, 0xd9, 0x98, 0xfa, 0xae, 0x43, 0x42, 0x39, 0x7f, 0x5a, 0x8a, 0xff, 0xbc, 0xf6, 0x8b, 0xcc, - 0x89, 0x65, 0xfa, 0xb8, 0xce, 0xf8, 0x6a, 0x17, 0x9d, 0x1a, 0x03, 0xe8, 0x2f, 0x24, 0xbd, 0x56, - 0x8e, 0xfe, 0x12, 0xf2, 0xf9, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x03, 0x65, 0x0b, 0x79, 0x99, - 0x3b, 0x00, 0x00, +// Indicates that the scheduler is happy with the job +type JobValidated struct { + JobId *Uuid `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + Pools []string `protobuf:"bytes,2,rep,name=pools,proto3" json:"pools,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` } -func (m *EventSequence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *JobValidated) Reset() { *m = JobValidated{} } +func (m *JobValidated) String() string { return proto.CompactTextString(m) } +func (*JobValidated) ProtoMessage() {} +func (*JobValidated) Descriptor() ([]byte, []int) { + return fileDescriptor_6aab92ca59e015f8, []int{44} +} +func (m *JobValidated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobValidated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_JobValidated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *EventSequence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *JobValidated) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobValidated.Merge(m, src) +} +func (m *JobValidated) XXX_Size() int { + return m.Size() +} +func (m *JobValidated) XXX_DiscardUnknown() { + xxx_messageInfo_JobValidated.DiscardUnknown(m) +} + +var xxx_messageInfo_JobValidated proto.InternalMessageInfo + +func (m *JobValidated) GetJobId() *Uuid { + if m != nil { + return m.JobId + } + return nil +} + +func (m *JobValidated) GetPools() []string { + if m != nil { + return m.Pools + } + return nil +} + +func (m *JobValidated) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +// Generated by the scheduler when a job is cancelled, all active job runs are also cancelled +// One such message is generated per job run that was cancelled. +type JobRunCancelled struct { + RunId *Uuid `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"runId,omitempty"` + JobId *Uuid `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + JobIdStr string `protobuf:"bytes,3,opt,name=job_id_str,json=jobIdStr,proto3" json:"jobIdStr,omitempty"` + RunIdStr string `protobuf:"bytes,4,opt,name=run_id_str,json=runIdStr,proto3" json:"runIdStr,omitempty"` +} + +func (m *JobRunCancelled) Reset() { *m = JobRunCancelled{} } +func (m *JobRunCancelled) String() string { return proto.CompactTextString(m) } +func (*JobRunCancelled) ProtoMessage() {} +func (*JobRunCancelled) Descriptor() ([]byte, []int) { + return fileDescriptor_6aab92ca59e015f8, []int{45} +} +func (m *JobRunCancelled) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobRunCancelled) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_JobRunCancelled.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *JobRunCancelled) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobRunCancelled.Merge(m, src) +} +func (m *JobRunCancelled) XXX_Size() int { + return m.Size() +} +func (m *JobRunCancelled) XXX_DiscardUnknown() { + xxx_messageInfo_JobRunCancelled.DiscardUnknown(m) +} + +var xxx_messageInfo_JobRunCancelled proto.InternalMessageInfo + +func (m *JobRunCancelled) GetRunId() *Uuid { + if m != nil { + return m.RunId + } + return nil +} + +func (m *JobRunCancelled) GetJobId() *Uuid { + if m != nil { + return m.JobId + } + return nil +} + +func (m *JobRunCancelled) GetJobIdStr() string { + if m != nil { + return m.JobIdStr + } + return "" +} + +func (m *JobRunCancelled) GetRunIdStr() string { + if m != nil { + return m.RunIdStr + } + return "" +} + +func init() { + proto.RegisterEnum("armadaevents.JobState", JobState_name, JobState_value) + proto.RegisterEnum("armadaevents.KubernetesReason", KubernetesReason_name, KubernetesReason_value) + proto.RegisterType((*EventSequence)(nil), "armadaevents.EventSequence") + proto.RegisterType((*EventSequence_Event)(nil), "armadaevents.EventSequence.Event") + proto.RegisterType((*ResourceUtilisation)(nil), "armadaevents.ResourceUtilisation") + proto.RegisterMapType((map[string]resource.Quantity)(nil), "armadaevents.ResourceUtilisation.MaxResourcesForPeriodEntry") + proto.RegisterMapType((map[string]resource.Quantity)(nil), "armadaevents.ResourceUtilisation.TotalCumulativeUsageEntry") + proto.RegisterType((*Uuid)(nil), "armadaevents.Uuid") + proto.RegisterType((*SubmitJob)(nil), "armadaevents.SubmitJob") + proto.RegisterType((*KubernetesMainObject)(nil), "armadaevents.KubernetesMainObject") + proto.RegisterType((*KubernetesObject)(nil), "armadaevents.KubernetesObject") + proto.RegisterType((*ObjectMeta)(nil), "armadaevents.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "armadaevents.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "armadaevents.ObjectMeta.LabelsEntry") + proto.RegisterType((*PodSpecWithAvoidList)(nil), "armadaevents.PodSpecWithAvoidList") + proto.RegisterType((*ReprioritiseJob)(nil), "armadaevents.ReprioritiseJob") + proto.RegisterType((*JobRequeued)(nil), "armadaevents.JobRequeued") + proto.RegisterType((*ReprioritiseJobSet)(nil), "armadaevents.ReprioritiseJobSet") + proto.RegisterType((*ReprioritisedJob)(nil), "armadaevents.ReprioritisedJob") + proto.RegisterType((*CancelJob)(nil), "armadaevents.CancelJob") + proto.RegisterType((*JobSetFilter)(nil), "armadaevents.JobSetFilter") + proto.RegisterType((*CancelJobSet)(nil), "armadaevents.CancelJobSet") + proto.RegisterType((*CancelledJob)(nil), "armadaevents.CancelledJob") + proto.RegisterType((*JobSucceeded)(nil), "armadaevents.JobSucceeded") + proto.RegisterType((*JobRunLeased)(nil), "armadaevents.JobRunLeased") + proto.RegisterType((*JobRunAssigned)(nil), "armadaevents.JobRunAssigned") + proto.RegisterType((*JobRunRunning)(nil), "armadaevents.JobRunRunning") + proto.RegisterType((*KubernetesResourceInfo)(nil), "armadaevents.KubernetesResourceInfo") + proto.RegisterType((*PodInfo)(nil), "armadaevents.PodInfo") + proto.RegisterType((*IngressInfo)(nil), "armadaevents.IngressInfo") + proto.RegisterMapType((map[int32]string)(nil), "armadaevents.IngressInfo.IngressAddressesEntry") + proto.RegisterType((*StandaloneIngressInfo)(nil), "armadaevents.StandaloneIngressInfo") + proto.RegisterMapType((map[int32]string)(nil), "armadaevents.StandaloneIngressInfo.IngressAddressesEntry") + proto.RegisterType((*JobRunSucceeded)(nil), "armadaevents.JobRunSucceeded") + proto.RegisterType((*JobErrors)(nil), "armadaevents.JobErrors") + proto.RegisterType((*JobRunErrors)(nil), "armadaevents.JobRunErrors") + proto.RegisterType((*Error)(nil), "armadaevents.Error") + proto.RegisterType((*KubernetesError)(nil), "armadaevents.KubernetesError") + proto.RegisterType((*PodError)(nil), "armadaevents.PodError") + proto.RegisterType((*ContainerError)(nil), "armadaevents.ContainerError") + proto.RegisterType((*PodLeaseReturned)(nil), "armadaevents.PodLeaseReturned") + proto.RegisterType((*PodTerminated)(nil), "armadaevents.PodTerminated") + proto.RegisterType((*ExecutorError)(nil), "armadaevents.ExecutorError") + proto.RegisterType((*PodUnschedulable)(nil), "armadaevents.PodUnschedulable") + proto.RegisterType((*LeaseExpired)(nil), "armadaevents.LeaseExpired") + proto.RegisterType((*MaxRunsExceeded)(nil), "armadaevents.MaxRunsExceeded") + proto.RegisterType((*JobRunPreemptedError)(nil), "armadaevents.JobRunPreemptedError") + proto.RegisterType((*GangJobUnschedulable)(nil), "armadaevents.GangJobUnschedulable") + proto.RegisterType((*JobRejected)(nil), "armadaevents.JobRejected") + proto.RegisterType((*JobRunPreempted)(nil), "armadaevents.JobRunPreempted") + proto.RegisterType((*PartitionMarker)(nil), "armadaevents.PartitionMarker") + proto.RegisterType((*JobRunPreemptionRequested)(nil), "armadaevents.JobRunPreemptionRequested") + proto.RegisterType((*JobPreemptionRequested)(nil), "armadaevents.JobPreemptionRequested") + proto.RegisterType((*JobValidated)(nil), "armadaevents.JobValidated") + proto.RegisterType((*JobRunCancelled)(nil), "armadaevents.JobRunCancelled") +} + +func init() { proto.RegisterFile("pkg/armadaevents/events.proto", fileDescriptor_6aab92ca59e015f8) } + +var fileDescriptor_6aab92ca59e015f8 = []byte{ + // 3831 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3c, 0x4b, 0x6f, 0x1b, 0x47, + 0x9a, 0x6e, 0x92, 0xe2, 0xe3, 0xa3, 0x1e, 0x74, 0xe9, 0x61, 0x5a, 0xb6, 0x45, 0x99, 0xce, 0x6e, + 0xec, 0x20, 0xa1, 0x12, 0xc5, 0x09, 0xf2, 0x58, 0x24, 0x10, 0x6d, 0xc5, 0xb6, 0x62, 0xd9, 0x0a, + 0x65, 0x65, 0xbd, 0x41, 0x16, 0x4c, 0x93, 0x5d, 0xa2, 0xda, 0x22, 0xbb, 0x99, 0x7e, 0x28, 0x12, + 0x90, 0x43, 0xb2, 0x08, 0x76, 0x6f, 0x59, 0x1f, 0xf6, 0xb0, 0xb7, 0xec, 0x1e, 0x77, 0x17, 0xc1, + 0x02, 0x7b, 0xdc, 0xc7, 0x6d, 0x0f, 0x59, 0x60, 0x31, 0xc8, 0x69, 0x30, 0x27, 0xce, 0x4c, 0x32, + 0x33, 0x07, 0x02, 0x33, 0xff, 0x60, 0x80, 0x41, 0xbd, 0xba, 0xab, 0x9a, 0x4d, 0x9b, 0x96, 0xac, + 0x38, 0x41, 0x72, 0xb2, 0xfb, 0x7b, 0x56, 0xd5, 0xf7, 0xd5, 0x57, 0xdf, 0xf7, 0x55, 0x51, 0x70, + 0xae, 0xbb, 0xdb, 0x5a, 0xd2, 0x9d, 0x8e, 0x6e, 0xe8, 0x78, 0x0f, 0x5b, 0x9e, 0xbb, 0xc4, 0xfe, + 0xa9, 0x74, 0x1d, 0xdb, 0xb3, 0xd1, 0xb8, 0x8c, 0x9a, 0x2f, 0xef, 0xbe, 0xe2, 0x56, 0x4c, 0x7b, + 0x49, 0xef, 0x9a, 0x4b, 0x4d, 0xdb, 0xc1, 0x4b, 0x7b, 0x2f, 0x2c, 0xb5, 0xb0, 0x85, 0x1d, 0xdd, + 0xc3, 0x06, 0xe3, 0x98, 0xbf, 0x28, 0xd1, 0x58, 0xd8, 0xfb, 0xc8, 0x76, 0x76, 0x4d, 0xab, 0x15, + 0x47, 0x59, 0x6a, 0xd9, 0x76, 0xab, 0x8d, 0x97, 0xe8, 0x57, 0xc3, 0xdf, 0x5e, 0xf2, 0xcc, 0x0e, + 0x76, 0x3d, 0xbd, 0xd3, 0xe5, 0x04, 0x97, 0x43, 0x51, 0x1d, 0xbd, 0xb9, 0x63, 0x5a, 0xd8, 0x39, + 0x58, 0xa2, 0xe3, 0xed, 0x9a, 0x4b, 0x0e, 0x76, 0x6d, 0xdf, 0x69, 0xe2, 0x01, 0xb1, 0xcf, 0xb5, + 0x4c, 0x6f, 0xc7, 0x6f, 0x54, 0x9a, 0x76, 0x67, 0xa9, 0x65, 0xb7, 0xec, 0x50, 0x3e, 0xf9, 0xa2, + 0x1f, 0xf4, 0x7f, 0x9c, 0xfc, 0x35, 0xd3, 0xf2, 0xb0, 0x63, 0xe9, 0xed, 0x25, 0xb7, 0xb9, 0x83, + 0x0d, 0xbf, 0x8d, 0x9d, 0xf0, 0x7f, 0x76, 0xe3, 0x1e, 0x6e, 0x7a, 0xee, 0x00, 0x80, 0xf1, 0x96, + 0x7f, 0x3d, 0x0b, 0x13, 0xab, 0x64, 0x69, 0x36, 0xf1, 0x87, 0x3e, 0xb6, 0x9a, 0x18, 0x5d, 0x82, + 0xb1, 0x0f, 0x7d, 0xec, 0xe3, 0xa2, 0xb6, 0xa8, 0x5d, 0xcc, 0x55, 0xa7, 0xfb, 0xbd, 0xd2, 0x14, + 0x05, 0x3c, 0x6b, 0x77, 0x4c, 0x0f, 0x77, 0xba, 0xde, 0x41, 0x8d, 0x51, 0xa0, 0xd7, 0x60, 0xfc, + 0x9e, 0xdd, 0xa8, 0xbb, 0xd8, 0xab, 0x5b, 0x7a, 0x07, 0x17, 0x13, 0x94, 0xa3, 0xd8, 0xef, 0x95, + 0x66, 0xee, 0xd9, 0x8d, 0x4d, 0xec, 0xdd, 0xd2, 0x3b, 0x32, 0x1b, 0x84, 0x50, 0xf4, 0x1c, 0x64, + 0x7c, 0x17, 0x3b, 0x75, 0xd3, 0x28, 0x26, 0x29, 0xdb, 0x4c, 0xbf, 0x57, 0x2a, 0x10, 0xd0, 0x0d, + 0x43, 0x62, 0x49, 0x33, 0x08, 0x7a, 0x16, 0xd2, 0x2d, 0xc7, 0xf6, 0xbb, 0x6e, 0x31, 0xb5, 0x98, + 0x14, 0xd4, 0x0c, 0x22, 0x53, 0x33, 0x08, 0xba, 0x0d, 0x69, 0x66, 0xef, 0xe2, 0xd8, 0x62, 0xf2, + 0x62, 0x7e, 0xf9, 0x7c, 0x45, 0x76, 0x82, 0x8a, 0x32, 0x61, 0xf6, 0xc5, 0x04, 0x32, 0xbc, 0x2c, + 0x90, 0xbb, 0xcd, 0x7f, 0x4e, 0xc3, 0x18, 0xa5, 0x43, 0xb7, 0x21, 0xd3, 0x74, 0x30, 0x31, 0x56, + 0x11, 0x2d, 0x6a, 0x17, 0xf3, 0xcb, 0xf3, 0x15, 0xe6, 0x04, 0x15, 0x61, 0xa4, 0xca, 0x1d, 0xe1, + 0x04, 0xd5, 0xd3, 0xfd, 0x5e, 0xe9, 0x24, 0x27, 0x0f, 0xa5, 0xde, 0xff, 0x65, 0x49, 0xab, 0x09, + 0x29, 0x68, 0x03, 0x72, 0xae, 0xdf, 0xe8, 0x98, 0xde, 0x9a, 0xdd, 0xa0, 0x6b, 0x9e, 0x5f, 0x3e, + 0xa5, 0x0e, 0x77, 0x53, 0xa0, 0xab, 0xa7, 0xfa, 0xbd, 0xd2, 0x74, 0x40, 0x1d, 0x4a, 0xbc, 0x7e, + 0xa2, 0x16, 0x0a, 0x41, 0x3b, 0x30, 0xe5, 0xe0, 0xae, 0x63, 0xda, 0x8e, 0xe9, 0x99, 0x2e, 0x26, + 0x72, 0x13, 0x54, 0xee, 0x39, 0x55, 0x6e, 0x4d, 0x25, 0xaa, 0x9e, 0xeb, 0xf7, 0x4a, 0xa7, 0x23, + 0x9c, 0x8a, 0x8e, 0xa8, 0x58, 0xe4, 0x01, 0x8a, 0x80, 0x36, 0xb1, 0x47, 0xed, 0x99, 0x5f, 0x5e, + 0x7c, 0xa0, 0xb2, 0x4d, 0xec, 0x55, 0x17, 0xfb, 0xbd, 0xd2, 0xd9, 0x41, 0x7e, 0x45, 0x65, 0x8c, + 0x7c, 0xd4, 0x86, 0x82, 0x0c, 0x35, 0xc8, 0x04, 0x53, 0x54, 0xe7, 0xc2, 0x70, 0x9d, 0x84, 0xaa, + 0xba, 0xd0, 0xef, 0x95, 0xe6, 0xa3, 0xbc, 0x8a, 0xbe, 0x01, 0xc9, 0xc4, 0x3e, 0x4d, 0xdd, 0x6a, + 0xe2, 0x36, 0x51, 0x33, 0x16, 0x67, 0x9f, 0x2b, 0x02, 0xcd, 0xec, 0x13, 0x50, 0xab, 0xf6, 0x09, + 0xc0, 0xe8, 0x7d, 0x18, 0x0f, 0x3e, 0xc8, 0x7a, 0xa5, 0xb9, 0x1f, 0xc5, 0x0b, 0x25, 0x2b, 0x35, + 0xdf, 0xef, 0x95, 0xe6, 0x64, 0x1e, 0x45, 0xb4, 0x22, 0x2d, 0x94, 0xde, 0x66, 0x2b, 0x93, 0x19, + 0x2e, 0x9d, 0x51, 0xc8, 0xd2, 0xdb, 0x83, 0x2b, 0xa2, 0x48, 0x23, 0xd2, 0xc9, 0x26, 0xf6, 0x9b, + 0x4d, 0x8c, 0x0d, 0x6c, 0x14, 0xb3, 0x71, 0xd2, 0xd7, 0x24, 0x0a, 0x26, 0x5d, 0xe6, 0x51, 0xa5, + 0xcb, 0x18, 0xb2, 0xd6, 0xf7, 0xec, 0xc6, 0xaa, 0xe3, 0xd8, 0x8e, 0x5b, 0xcc, 0xc5, 0xad, 0xf5, + 0x9a, 0x40, 0xb3, 0xb5, 0x0e, 0xa8, 0xd5, 0xb5, 0x0e, 0xc0, 0x7c, 0xbc, 0x35, 0xdf, 0xba, 0x89, + 0x75, 0x17, 0x1b, 0x45, 0x18, 0x32, 0xde, 0x80, 0x22, 0x18, 0x6f, 0x00, 0x19, 0x18, 0x6f, 0x80, + 0x41, 0x06, 0x4c, 0xb2, 0xef, 0x15, 0xd7, 0x35, 0x5b, 0x16, 0x36, 0x8a, 0x79, 0x2a, 0xff, 0x6c, + 0x9c, 0x7c, 0x41, 0x53, 0x3d, 0xdb, 0xef, 0x95, 0x8a, 0x2a, 0x9f, 0xa2, 0x23, 0x22, 0x13, 0x7d, + 0x00, 0x13, 0x0c, 0x52, 0xf3, 0x2d, 0xcb, 0xb4, 0x5a, 0xc5, 0x71, 0xaa, 0xe4, 0x4c, 0x9c, 0x12, + 0x4e, 0x52, 0x3d, 0xd3, 0xef, 0x95, 0x4e, 0x29, 0x5c, 0x8a, 0x0a, 0x55, 0x20, 0x89, 0x18, 0x0c, + 0x10, 0x1a, 0x76, 0x22, 0x2e, 0x62, 0xac, 0xa9, 0x44, 0x2c, 0x62, 0x44, 0x38, 0xd5, 0x88, 0x11, + 0x41, 0x86, 0xf6, 0xe0, 0x46, 0x9e, 0x1c, 0x6e, 0x0f, 0x6e, 0x67, 0xc9, 0x1e, 0x31, 0xa6, 0x56, + 0xa4, 0xa1, 0x4f, 0x34, 0x98, 0x75, 0x3d, 0xdd, 0x32, 0xf4, 0xb6, 0x6d, 0xe1, 0x1b, 0x56, 0xcb, + 0xc1, 0xae, 0x7b, 0xc3, 0xda, 0xb6, 0x8b, 0x05, 0xaa, 0xe7, 0x42, 0x24, 0xb0, 0xc6, 0x91, 0x56, + 0x2f, 0xf4, 0x7b, 0xa5, 0x52, 0xac, 0x14, 0x45, 0x73, 0xbc, 0x22, 0xb4, 0x0f, 0xd3, 0xe2, 0x5c, + 0xdf, 0xf2, 0xcc, 0xb6, 0xe9, 0xea, 0x9e, 0x69, 0x5b, 0xc5, 0x93, 0x54, 0xff, 0xf9, 0x68, 0x7c, + 0x1a, 0x20, 0xac, 0x9e, 0xef, 0xf7, 0x4a, 0xe7, 0x62, 0x24, 0x28, 0xba, 0xe3, 0x54, 0x84, 0x46, + 0xdc, 0x70, 0x30, 0x21, 0xc4, 0x46, 0x71, 0x7a, 0xb8, 0x11, 0x03, 0x22, 0xd9, 0x88, 0x01, 0x30, + 0xce, 0x88, 0x01, 0x92, 0x68, 0xea, 0xea, 0x8e, 0x67, 0x12, 0xb5, 0xeb, 0xba, 0xb3, 0x8b, 0x9d, + 0xe2, 0x4c, 0x9c, 0xa6, 0x0d, 0x95, 0x88, 0x69, 0x8a, 0x70, 0xaa, 0x9a, 0x22, 0x48, 0x74, 0x5f, + 0x03, 0x75, 0x68, 0xa6, 0x6d, 0xd5, 0xc8, 0xc1, 0xed, 0x92, 0xe9, 0xcd, 0x52, 0xa5, 0x4f, 0x3f, + 0x60, 0x7a, 0x32, 0x79, 0xf5, 0xe9, 0x7e, 0xaf, 0x74, 0x61, 0xa8, 0x34, 0x65, 0x20, 0xc3, 0x95, + 0xa2, 0xbb, 0x90, 0x27, 0x48, 0x4c, 0x53, 0x20, 0xa3, 0x38, 0x47, 0xc7, 0x70, 0x7a, 0x70, 0x0c, + 0x9c, 0x80, 0xe6, 0x00, 0xb3, 0x12, 0x87, 0xa2, 0x47, 0x16, 0x85, 0x3e, 0xd3, 0x80, 0x38, 0x7a, + 0xdc, 0x4c, 0x4f, 0x51, 0x2d, 0x4f, 0x0d, 0x68, 0x89, 0x9b, 0xe6, 0x53, 0xfd, 0x5e, 0x69, 0x31, + 0x5e, 0x8e, 0xa2, 0x7b, 0x88, 0xae, 0xd0, 0x8f, 0x82, 0x43, 0xa2, 0x58, 0x1c, 0xee, 0x47, 0x01, + 0x91, 0xec, 0x47, 0x01, 0x30, 0xce, 0x8f, 0x02, 0x24, 0x0f, 0x06, 0xef, 0xea, 0x6d, 0xd3, 0xa0, + 0x09, 0xd5, 0xe9, 0x21, 0xc1, 0x20, 0xa0, 0x08, 0x82, 0x41, 0x00, 0x19, 0x08, 0x06, 0x21, 0x6d, + 0x06, 0xc6, 0xa8, 0x88, 0xf2, 0xcf, 0x33, 0x30, 0x1d, 0xb3, 0xd5, 0xd0, 0x1b, 0x90, 0x76, 0x7c, + 0x8b, 0x64, 0xa0, 0x2c, 0xed, 0x42, 0xaa, 0xe2, 0x2d, 0xdf, 0x34, 0x58, 0xfa, 0xeb, 0xf8, 0x96, + 0x92, 0x94, 0x8e, 0x51, 0x00, 0xe1, 0x27, 0xe9, 0xaf, 0x69, 0xf0, 0xf4, 0x6a, 0x28, 0xff, 0x3d, + 0xbb, 0xa1, 0xf2, 0x53, 0x00, 0xc2, 0x30, 0x21, 0xf6, 0x71, 0xdd, 0x24, 0x41, 0x2a, 0x19, 0x67, + 0xe5, 0xb7, 0xfd, 0x06, 0x76, 0x2c, 0xec, 0x61, 0x57, 0xcc, 0x81, 0x46, 0x29, 0xba, 0x12, 0x8e, + 0x04, 0x91, 0xe4, 0x8f, 0xcb, 0x70, 0xf4, 0x0f, 0x1a, 0x14, 0x3b, 0xfa, 0x7e, 0x5d, 0x00, 0xdd, + 0xfa, 0xb6, 0xed, 0xd4, 0xbb, 0xd8, 0x31, 0x6d, 0x83, 0x66, 0xd3, 0xf9, 0xe5, 0xbf, 0x78, 0x68, + 0x5c, 0xaa, 0xac, 0xeb, 0xfb, 0x02, 0xec, 0xbe, 0x65, 0x3b, 0x1b, 0x94, 0x7d, 0xd5, 0xf2, 0x9c, + 0x83, 0xea, 0xb9, 0xaf, 0x7a, 0xa5, 0x13, 0xc4, 0xcb, 0x3b, 0x71, 0x34, 0xb5, 0x78, 0x30, 0xfa, + 0x7b, 0x0d, 0xe6, 0x3c, 0xdb, 0xd3, 0xdb, 0xf5, 0xa6, 0xdf, 0xf1, 0xdb, 0xba, 0x67, 0xee, 0xe1, + 0xba, 0xef, 0xea, 0x2d, 0xcc, 0x93, 0xf6, 0xd7, 0x1f, 0x3e, 0xa8, 0x3b, 0x84, 0xff, 0x4a, 0xc0, + 0xbe, 0x45, 0xb8, 0xd9, 0x98, 0xce, 0xf2, 0x31, 0xcd, 0x78, 0x31, 0x24, 0xb5, 0x58, 0x28, 0xba, + 0x0c, 0xc0, 0xec, 0x59, 0x77, 0x3d, 0x87, 0x66, 0x65, 0xb9, 0xea, 0x5c, 0xbf, 0x57, 0x42, 0xd4, + 0x5c, 0x9b, 0x9e, 0x14, 0xab, 0x6a, 0x59, 0x01, 0x23, 0x5c, 0xcc, 0x8b, 0x28, 0x57, 0x26, 0xe4, + 0xa2, 0x4e, 0x12, 0xe1, 0x12, 0xb0, 0xf9, 0x7f, 0xd2, 0x60, 0x7e, 0xf8, 0x92, 0xa2, 0x0b, 0x90, + 0xdc, 0xc5, 0x07, 0xbc, 0x04, 0x3b, 0xd9, 0xef, 0x95, 0x26, 0x76, 0xf1, 0x81, 0x24, 0x88, 0x60, + 0xd1, 0x5f, 0xc1, 0xd8, 0x9e, 0xde, 0xf6, 0x31, 0x77, 0xbf, 0x4a, 0x85, 0x15, 0x9b, 0x15, 0xb9, + 0xd8, 0xac, 0x74, 0x77, 0x5b, 0x04, 0x50, 0x11, 0xd6, 0xaf, 0xbc, 0xe3, 0xeb, 0x96, 0x67, 0x7a, + 0x07, 0xcc, 0x35, 0xa9, 0x00, 0xd9, 0x35, 0x29, 0xe0, 0xb5, 0xc4, 0x2b, 0xda, 0xfc, 0x17, 0x1a, + 0x9c, 0x1e, 0xba, 0xc0, 0xdf, 0x87, 0x11, 0x96, 0xeb, 0x90, 0x22, 0x9b, 0x8c, 0x14, 0x87, 0x3b, + 0x66, 0x6b, 0xe7, 0xe5, 0xcb, 0x74, 0x38, 0x69, 0x56, 0xcb, 0x31, 0x88, 0x5c, 0xcb, 0x31, 0x08, + 0x29, 0x70, 0xdb, 0xf6, 0x47, 0x2f, 0x5f, 0xa6, 0x83, 0x4a, 0x33, 0x25, 0x14, 0x20, 0x2b, 0xa1, + 0x80, 0x72, 0x3f, 0x0d, 0xb9, 0xa0, 0xfa, 0x92, 0xf6, 0xbb, 0x76, 0xa8, 0xfd, 0x7e, 0x1d, 0x0a, + 0x06, 0x36, 0xfc, 0x6e, 0xdb, 0x6c, 0x52, 0xf7, 0x15, 0x91, 0x23, 0xc7, 0x42, 0xa7, 0x82, 0x53, + 0xf8, 0xa7, 0x22, 0x28, 0xb4, 0x0c, 0x59, 0x5e, 0xa5, 0x1c, 0xd0, 0xa0, 0x31, 0xc1, 0x3c, 0x4e, + 0xc0, 0x64, 0x8f, 0x13, 0x30, 0x54, 0x03, 0x60, 0xa5, 0xff, 0x3a, 0xf6, 0x74, 0x5e, 0x2f, 0x15, + 0xd5, 0x19, 0xdc, 0x0e, 0xf0, 0xac, 0x88, 0x0f, 0xe9, 0xe5, 0x22, 0x3e, 0x84, 0xa2, 0xf7, 0x01, + 0x3a, 0xba, 0x69, 0x31, 0x3e, 0x5e, 0x1c, 0x95, 0x87, 0x85, 0xaf, 0xf5, 0x80, 0x92, 0x49, 0x0f, + 0x39, 0x65, 0xe9, 0x21, 0x94, 0x94, 0xda, 0xbc, 0x59, 0x51, 0x4c, 0xd3, 0x88, 0xb0, 0x30, 0x4c, + 0x34, 0x17, 0x3b, 0x4b, 0xca, 0x6d, 0xce, 0x22, 0xc9, 0x14, 0x52, 0xc8, 0xb2, 0xb5, 0xcd, 0x6d, + 0xec, 0x99, 0x1d, 0x4c, 0x37, 0x2a, 0x5f, 0x36, 0x01, 0x93, 0x97, 0x4d, 0xc0, 0xd0, 0x2b, 0x00, + 0xba, 0xb7, 0x6e, 0xbb, 0xde, 0x6d, 0xab, 0x89, 0x69, 0xb9, 0x93, 0x65, 0xc3, 0x0f, 0xa1, 0xf2, + 0xf0, 0x43, 0x28, 0x7a, 0x1d, 0xf2, 0x5d, 0x7e, 0xbc, 0x36, 0xda, 0x98, 0x96, 0x33, 0x59, 0x96, + 0x0d, 0x48, 0x60, 0x89, 0x57, 0xa6, 0x46, 0xd7, 0x60, 0xaa, 0x69, 0x5b, 0x4d, 0xdf, 0x71, 0xb0, + 0xd5, 0x3c, 0xd8, 0xd4, 0xb7, 0x31, 0x2d, 0x5d, 0xb2, 0xcc, 0x55, 0x22, 0x28, 0xd9, 0x55, 0x22, + 0x28, 0xf4, 0x12, 0xe4, 0x82, 0xd6, 0x0f, 0xad, 0x4e, 0x72, 0xbc, 0x8b, 0x20, 0x80, 0x12, 0x73, + 0x48, 0x49, 0x06, 0x6f, 0xba, 0x57, 0xb9, 0xd3, 0x61, 0x5a, 0x71, 0xf0, 0xc1, 0x4b, 0x60, 0x79, + 0xf0, 0x12, 0x38, 0x12, 0x48, 0x27, 0x47, 0x0b, 0xa4, 0xe5, 0xff, 0xd7, 0x60, 0x26, 0xce, 0x5b, + 0x22, 0x9e, 0xab, 0x3d, 0x16, 0xcf, 0x7d, 0x17, 0xb2, 0x5d, 0xdb, 0xa8, 0xbb, 0x5d, 0xdc, 0xe4, + 0xc1, 0x29, 0xe2, 0xb7, 0x1b, 0xb6, 0xb1, 0xd9, 0xc5, 0xcd, 0xbf, 0x34, 0xbd, 0x9d, 0x95, 0x3d, + 0xdb, 0x34, 0x6e, 0x9a, 0x2e, 0x77, 0xb0, 0x2e, 0xc3, 0x28, 0x99, 0x47, 0x86, 0x03, 0xab, 0x59, + 0x48, 0x33, 0x2d, 0xe5, 0x9f, 0x25, 0xa1, 0x10, 0xf5, 0xd0, 0x1f, 0xd2, 0x54, 0xd0, 0x5d, 0xc8, + 0x98, 0xac, 0xb0, 0xe1, 0x89, 0xc9, 0x9f, 0x49, 0xe1, 0xbb, 0x12, 0x36, 0x46, 0x2b, 0x7b, 0x2f, + 0x54, 0x78, 0x05, 0x44, 0x97, 0x80, 0x4a, 0xe6, 0x9c, 0xaa, 0x64, 0x0e, 0x44, 0x35, 0xc8, 0xb8, + 0xd8, 0xd9, 0x33, 0x9b, 0x98, 0xc7, 0xa1, 0x92, 0x2c, 0xb9, 0x69, 0x3b, 0x98, 0xc8, 0xdc, 0x64, + 0x24, 0xa1, 0x4c, 0xce, 0xa3, 0xca, 0xe4, 0x40, 0xf4, 0x2e, 0xe4, 0x9a, 0xb6, 0xb5, 0x6d, 0xb6, + 0xd6, 0xf5, 0x2e, 0x8f, 0x44, 0xe7, 0xe2, 0xa4, 0x5e, 0x11, 0x44, 0xbc, 0x59, 0x23, 0x3e, 0x23, + 0xcd, 0x9a, 0x80, 0x2a, 0x34, 0xe8, 0x1f, 0x52, 0x00, 0xa1, 0x71, 0xd0, 0xab, 0x90, 0xc7, 0xfb, + 0xb8, 0xe9, 0x7b, 0xb6, 0x23, 0x8e, 0x04, 0xde, 0xfb, 0x14, 0x60, 0x25, 0x86, 0x43, 0x08, 0x25, + 0x7b, 0xd2, 0xd2, 0x3b, 0xd8, 0xed, 0xea, 0x4d, 0xd1, 0x34, 0xa5, 0x83, 0x09, 0x80, 0xf2, 0x9e, + 0x0c, 0x80, 0xe8, 0xcf, 0x21, 0x45, 0xdb, 0xac, 0xac, 0x5f, 0x8a, 0xfa, 0xbd, 0xd2, 0xa4, 0xa5, + 0x36, 0x58, 0x29, 0x1e, 0xbd, 0x09, 0x13, 0xbb, 0x81, 0xe3, 0x91, 0xb1, 0xa5, 0x28, 0x03, 0xcd, + 0x18, 0x43, 0x84, 0x32, 0xba, 0x71, 0x19, 0x8e, 0xb6, 0x21, 0xaf, 0x5b, 0x96, 0xed, 0xd1, 0xe3, + 0x46, 0xf4, 0x50, 0x2f, 0x0d, 0x73, 0xd3, 0xca, 0x4a, 0x48, 0xcb, 0x92, 0x2f, 0x1a, 0x27, 0x24, + 0x09, 0x72, 0x9c, 0x90, 0xc0, 0xa8, 0x06, 0xe9, 0xb6, 0xde, 0xc0, 0x6d, 0x11, 0xdf, 0x9f, 0x1a, + 0xaa, 0xe2, 0x26, 0x25, 0x63, 0xd2, 0xe9, 0xe9, 0xce, 0xf8, 0xe4, 0xd3, 0x9d, 0x41, 0xe6, 0xb7, + 0xa1, 0x10, 0x1d, 0xcf, 0x68, 0xb9, 0xca, 0x25, 0x39, 0x57, 0xc9, 0x3d, 0x34, 0x3b, 0xd2, 0x21, + 0x2f, 0x0d, 0xea, 0x38, 0x54, 0x94, 0xff, 0x45, 0x83, 0x99, 0xb8, 0xbd, 0x8b, 0xd6, 0xa5, 0x1d, + 0xaf, 0xf1, 0x5e, 0x50, 0x8c, 0xab, 0x73, 0xde, 0x21, 0x5b, 0x3d, 0xdc, 0xe8, 0x55, 0x98, 0xb4, + 0x6c, 0x03, 0xd7, 0x75, 0xa2, 0xa0, 0x6d, 0xba, 0x5e, 0x31, 0x41, 0x7b, 0xec, 0xb4, 0x87, 0x44, + 0x30, 0x2b, 0x02, 0x21, 0x71, 0x4f, 0x28, 0x88, 0xf2, 0x7f, 0x69, 0x30, 0x15, 0x69, 0xf1, 0x1e, + 0x39, 0x5f, 0x92, 0xb3, 0x9c, 0xc4, 0x88, 0x59, 0x8e, 0x7a, 0xf4, 0x24, 0x47, 0x3c, 0x7a, 0xfe, + 0x2f, 0x01, 0x79, 0xa9, 0x66, 0x3f, 0xf2, 0xc8, 0xef, 0xc1, 0x14, 0x3f, 0x4a, 0x4d, 0xab, 0xc5, + 0x6a, 0xbb, 0x04, 0x6f, 0x40, 0x0d, 0xdc, 0xc3, 0xac, 0xd9, 0x8d, 0xcd, 0x80, 0x96, 0x96, 0x76, + 0xb4, 0x3f, 0xe8, 0x2a, 0x30, 0x49, 0xc5, 0xa4, 0x8a, 0x41, 0x77, 0x61, 0xce, 0xef, 0x92, 0x8a, + 0xb7, 0xee, 0xf2, 0x1b, 0x8d, 0xba, 0xe5, 0x77, 0x1a, 0x98, 0xcd, 0x7e, 0xac, 0x5a, 0xee, 0xf7, + 0x4a, 0x0b, 0x8c, 0x42, 0x5c, 0x79, 0xdc, 0xa2, 0x78, 0x49, 0xe6, 0x4c, 0x1c, 0x3e, 0xb2, 0x96, + 0xa9, 0x11, 0xd7, 0xf2, 0x3a, 0xa0, 0xc1, 0x5e, 0xbf, 0x62, 0x4b, 0x6d, 0x34, 0x5b, 0x96, 0xff, + 0x5b, 0x83, 0x42, 0xb4, 0x85, 0xff, 0x03, 0x72, 0xaa, 0x7f, 0xd7, 0x20, 0x17, 0x74, 0xf1, 0x8f, + 0x3c, 0xee, 0x67, 0x21, 0xed, 0x60, 0xdd, 0xb5, 0x2d, 0x1e, 0x3c, 0x68, 0x14, 0x64, 0x10, 0x39, + 0x0a, 0x32, 0xc8, 0x21, 0x47, 0x7c, 0x07, 0xc6, 0x99, 0xb9, 0xde, 0x32, 0xdb, 0x1e, 0x76, 0xd0, + 0x55, 0x48, 0xbb, 0x9e, 0xee, 0x61, 0xb7, 0xa8, 0x2d, 0x26, 0x2f, 0x4e, 0x2e, 0xcf, 0x0d, 0xb6, + 0xf9, 0x09, 0x9a, 0x8d, 0x85, 0x51, 0xca, 0x63, 0x61, 0x90, 0xf2, 0xdf, 0x68, 0x30, 0x2e, 0xdf, + 0x66, 0x3c, 0x1e, 0xb1, 0x8f, 0xb6, 0x20, 0xe5, 0xff, 0x08, 0x06, 0xd1, 0x7e, 0x3c, 0x7e, 0xf4, + 0x5d, 0xd8, 0xe3, 0xf7, 0x1a, 0x33, 0x48, 0xd0, 0x3d, 0x3f, 0xea, 0xa0, 0x5b, 0x61, 0xc7, 0x89, + 0xc4, 0x0e, 0x97, 0x06, 0xfa, 0x51, 0x3b, 0x4e, 0xf4, 0x38, 0x50, 0xd8, 0xe5, 0xe3, 0x40, 0x41, + 0x1c, 0x72, 0xbe, 0x9f, 0xa4, 0xe9, 0x7c, 0xc3, 0xfb, 0x95, 0x27, 0xdd, 0xa1, 0x8b, 0xe4, 0x78, + 0xc9, 0x47, 0xc8, 0xf1, 0x9e, 0x83, 0x0c, 0x3d, 0x54, 0x83, 0xf4, 0x8b, 0x3a, 0x08, 0x01, 0xa9, + 0xf7, 0xdb, 0x0c, 0xf2, 0x80, 0x28, 0x3e, 0x76, 0xc4, 0x28, 0x5e, 0x87, 0xd3, 0x3b, 0xba, 0x5b, + 0x17, 0xe7, 0x8e, 0x51, 0xd7, 0xbd, 0x7a, 0x10, 0x01, 0xd3, 0xb4, 0xae, 0xa3, 0x1d, 0xe3, 0x1d, + 0xdd, 0xdd, 0x14, 0x34, 0x2b, 0xde, 0xc6, 0x60, 0x3c, 0x9c, 0x8b, 0xa7, 0x40, 0x5b, 0x30, 0x1b, + 0x2f, 0x3c, 0x43, 0x47, 0x4e, 0x2f, 0x34, 0xdc, 0x07, 0x4a, 0x9e, 0x8e, 0x41, 0xa3, 0x4f, 0x35, + 0x28, 0x92, 0x2c, 0xc7, 0xc1, 0x1f, 0xfa, 0xa6, 0x83, 0x3b, 0xc4, 0x62, 0x75, 0x7b, 0x0f, 0x3b, + 0x6d, 0xfd, 0x80, 0xdf, 0x0d, 0x9e, 0x1f, 0x3c, 0x4d, 0x37, 0x6c, 0xa3, 0x26, 0x31, 0xb0, 0xa9, + 0x75, 0x55, 0xe0, 0x6d, 0x26, 0x44, 0x9e, 0x5a, 0x3c, 0x45, 0xc4, 0x8d, 0xe1, 0x50, 0x1d, 0xc1, + 0xfc, 0x68, 0x1d, 0xc1, 0xb5, 0x54, 0x36, 0x5b, 0xc8, 0x95, 0x7f, 0x9b, 0x80, 0x49, 0xf5, 0xba, + 0xf0, 0x89, 0x6f, 0x82, 0x81, 0xa0, 0x91, 0xfc, 0x4e, 0x82, 0x46, 0xea, 0x50, 0xab, 0x3d, 0x36, + 0xda, 0x6a, 0x97, 0x7f, 0x93, 0x80, 0x09, 0xe5, 0xc6, 0xf4, 0xa7, 0x65, 0x3e, 0x8e, 0x65, 0xfe, + 0xc7, 0x04, 0xcc, 0xc5, 0x0f, 0xf9, 0x58, 0x5a, 0x21, 0xd7, 0x81, 0x14, 0x35, 0x37, 0xc2, 0x7c, + 0x7b, 0x76, 0xa0, 0x13, 0x42, 0x97, 0x4b, 0x54, 0x44, 0x03, 0x97, 0xba, 0x82, 0x1d, 0xdd, 0x85, + 0xbc, 0x29, 0x5d, 0x1f, 0x27, 0xe3, 0x6e, 0xf9, 0xe4, 0x4b, 0x63, 0xd6, 0x1a, 0x1b, 0x72, 0x55, + 0x2c, 0x8b, 0xaa, 0xa6, 0x21, 0x45, 0x0a, 0x82, 0xf2, 0x1e, 0x64, 0xf8, 0x70, 0xd0, 0x8b, 0x90, + 0xa3, 0x67, 0x05, 0xad, 0xee, 0xb5, 0x70, 0x69, 0x09, 0x30, 0xf2, 0x84, 0x2a, 0x2b, 0x60, 0xe8, + 0x65, 0x00, 0x12, 0x1e, 0xf9, 0x29, 0x91, 0xa0, 0xb1, 0x96, 0x76, 0x11, 0xba, 0xb6, 0x31, 0x70, + 0x34, 0xe4, 0x02, 0x60, 0xf9, 0xcb, 0x04, 0xe4, 0xe5, 0x0b, 0xeb, 0x43, 0x29, 0xff, 0x18, 0x44, + 0x87, 0xa7, 0xae, 0x1b, 0x06, 0xf9, 0x17, 0x8b, 0x64, 0x62, 0x69, 0xe8, 0x22, 0x89, 0xff, 0xaf, + 0x08, 0x0e, 0x56, 0xcf, 0xd3, 0x47, 0x39, 0x66, 0x04, 0x25, 0x69, 0x2d, 0x44, 0x71, 0xf3, 0xbb, + 0x30, 0x1b, 0x2b, 0x4a, 0xae, 0xc2, 0xc7, 0x1e, 0x57, 0x15, 0xfe, 0x65, 0x1a, 0x66, 0x63, 0x1f, + 0x0a, 0x3c, 0xf1, 0x88, 0xa1, 0xee, 0xa0, 0xe4, 0x63, 0xd9, 0x41, 0x7f, 0xab, 0xc5, 0x59, 0x96, + 0xdd, 0x12, 0xbe, 0x3a, 0xc2, 0xeb, 0x89, 0xc7, 0x65, 0x63, 0xd5, 0x2d, 0xc7, 0x0e, 0xb5, 0x27, + 0xd2, 0xa3, 0xee, 0x09, 0xf4, 0x3c, 0x6b, 0xa8, 0x50, 0x5d, 0xec, 0x06, 0x4f, 0x44, 0x88, 0x88, + 0xaa, 0x0c, 0x07, 0xa1, 0x37, 0x61, 0x42, 0x70, 0xb0, 0x36, 0x5e, 0x36, 0xec, 0xb1, 0x71, 0x9a, + 0x68, 0x27, 0x6f, 0x5c, 0x86, 0x47, 0xa2, 0x70, 0xee, 0x50, 0x51, 0x18, 0x46, 0xbc, 0x6c, 0xfc, + 0x4e, 0xf7, 0xcb, 0xef, 0x12, 0x30, 0x15, 0x79, 0x27, 0xf4, 0xd3, 0xd9, 0x7a, 0x1c, 0x67, 0xeb, + 0xff, 0x6a, 0x90, 0x0b, 0x9e, 0xc3, 0x1d, 0xb9, 0x34, 0x5c, 0x81, 0x34, 0x66, 0x4f, 0xb2, 0x58, + 0x18, 0x9f, 0x8e, 0x3c, 0x99, 0x25, 0x38, 0xfe, 0x48, 0x36, 0xf2, 0x0a, 0xab, 0xc6, 0x19, 0x0f, + 0x59, 0xf4, 0xfd, 0x4f, 0x42, 0x14, 0x7d, 0xe1, 0x4c, 0x9e, 0xa8, 0xb3, 0x84, 0x2b, 0x91, 0x7c, + 0x3c, 0x2b, 0x71, 0xbc, 0x6e, 0xf0, 0xcf, 0x00, 0x63, 0x74, 0x4c, 0x68, 0x19, 0xb2, 0x1e, 0x76, + 0x3a, 0xa6, 0xa5, 0xb7, 0xe9, 0xd2, 0x65, 0x19, 0xb7, 0x80, 0xc9, 0xdc, 0x02, 0x86, 0x76, 0x60, + 0x2a, 0x6c, 0xfd, 0x53, 0x31, 0xf1, 0x6f, 0x85, 0xdf, 0x56, 0x89, 0xd8, 0x35, 0x64, 0x84, 0x53, + 0x7d, 0xec, 0x13, 0x41, 0x22, 0x03, 0x26, 0x9b, 0xb6, 0xe5, 0xe9, 0xa6, 0x85, 0x1d, 0xa6, 0x28, + 0x19, 0xf7, 0x56, 0xf2, 0x8a, 0x42, 0xc3, 0x7a, 0xa1, 0x2a, 0x9f, 0xfa, 0x56, 0x52, 0xc5, 0xa1, + 0x0f, 0x60, 0x42, 0x14, 0xe1, 0x4c, 0x49, 0x2a, 0xee, 0xad, 0xe4, 0xaa, 0x4c, 0xc2, 0x36, 0xb8, + 0xc2, 0xa5, 0xbe, 0x95, 0x54, 0x50, 0xa8, 0x0d, 0x85, 0xae, 0x6d, 0x6c, 0x59, 0xbc, 0xf4, 0xd4, + 0x1b, 0x6d, 0xcc, 0xef, 0x9b, 0x16, 0x06, 0x92, 0x4d, 0x85, 0x8a, 0x1d, 0x82, 0x51, 0x5e, 0xf5, + 0xf5, 0x71, 0x14, 0x8b, 0xde, 0x87, 0xf1, 0x36, 0xd6, 0x5d, 0xbc, 0xba, 0xdf, 0x35, 0x1d, 0x6c, + 0xc4, 0xbf, 0x15, 0xbe, 0x29, 0x51, 0xb0, 0x23, 0x48, 0xe6, 0x51, 0x9f, 0x48, 0xc9, 0x18, 0x62, + 0xfd, 0x8e, 0xbe, 0x5f, 0xf3, 0x2d, 0x77, 0x75, 0x9f, 0xbf, 0xfb, 0xcc, 0xc4, 0x59, 0x7f, 0x5d, + 0x25, 0x62, 0xd6, 0x8f, 0x70, 0xaa, 0xd6, 0x8f, 0x20, 0xd1, 0x4d, 0x7a, 0xc2, 0x32, 0x93, 0xb0, + 0x37, 0xc3, 0x73, 0x03, 0xab, 0xc5, 0xac, 0xc1, 0xda, 0xb1, 0xfc, 0x4b, 0x11, 0x1a, 0x48, 0xe0, + 0x36, 0xa0, 0xd3, 0xae, 0x61, 0xcf, 0x77, 0x2c, 0x6c, 0xf0, 0x96, 0xc0, 0xa0, 0x0d, 0x14, 0xaa, + 0xc0, 0x06, 0x0a, 0x74, 0xc0, 0x06, 0x0a, 0x96, 0xf8, 0x54, 0xd7, 0x36, 0xee, 0xb0, 0x2d, 0xe3, + 0x05, 0x8f, 0x88, 0xcf, 0x0c, 0xa8, 0x0a, 0x49, 0x98, 0x4f, 0x29, 0x5c, 0xaa, 0x4f, 0x29, 0x28, + 0xf4, 0x31, 0xcc, 0x44, 0xde, 0x58, 0xb2, 0x95, 0xca, 0xc7, 0x5d, 0xe7, 0xae, 0xc5, 0x50, 0xb2, + 0xfe, 0x50, 0x9c, 0x0c, 0x45, 0x6d, 0xac, 0x16, 0xa2, 0xbd, 0xa5, 0x5b, 0xad, 0x35, 0xbb, 0xa1, + 0x7a, 0xf5, 0x78, 0x9c, 0xf6, 0x6b, 0x31, 0x94, 0x4c, 0x7b, 0x9c, 0x0c, 0x55, 0x7b, 0x1c, 0x45, + 0xf0, 0x9e, 0x92, 0xa4, 0xa0, 0xc1, 0xbb, 0xe3, 0xb8, 0xf7, 0x94, 0x8c, 0x40, 0x7a, 0x4f, 0xc9, + 0x00, 0x31, 0xef, 0x29, 0x39, 0x65, 0x56, 0xb4, 0x68, 0xcb, 0x5f, 0x68, 0x30, 0x15, 0x89, 0x60, + 0xe8, 0x0d, 0x08, 0x9e, 0xc9, 0xdd, 0x39, 0xe8, 0x8a, 0xd2, 0x47, 0x79, 0x56, 0x47, 0xe0, 0x71, + 0xcf, 0xea, 0x08, 0x1c, 0xdd, 0x04, 0x08, 0xce, 0xfe, 0x07, 0x1d, 0x35, 0x34, 0xef, 0x0e, 0x29, + 0xe5, 0xbc, 0x3b, 0x84, 0x96, 0xef, 0xa7, 0x20, 0x2b, 0xb6, 0xc0, 0xb1, 0x94, 0xc6, 0x4b, 0x90, + 0xe9, 0x60, 0x97, 0x3e, 0xaf, 0x4b, 0x84, 0x19, 0x2e, 0x07, 0xc9, 0x19, 0x2e, 0x07, 0xa9, 0x09, + 0x78, 0xf2, 0x50, 0x09, 0x78, 0x6a, 0xe4, 0x04, 0x1c, 0xd3, 0xe7, 0x2e, 0x52, 0x20, 0x17, 0xb7, + 0xce, 0x0f, 0x3e, 0x1d, 0xc4, 0x63, 0x18, 0x99, 0x31, 0xf2, 0x18, 0x46, 0x46, 0xa1, 0x5d, 0x38, + 0x29, 0xdd, 0x8c, 0xf3, 0xfe, 0x3d, 0x09, 0xa9, 0x93, 0xc3, 0xdf, 0x16, 0xd5, 0x28, 0x15, 0x0b, + 0x1c, 0xbb, 0x11, 0xa8, 0x5c, 0xc1, 0x44, 0x71, 0xc4, 0xc1, 0x0c, 0xdc, 0xf0, 0x5b, 0xeb, 0x7c, + 0xd9, 0x33, 0xa1, 0x83, 0xc9, 0x70, 0xd9, 0xc1, 0x64, 0x38, 0x6d, 0x05, 0xaa, 0xf3, 0x3d, 0x16, + 0xc7, 0x78, 0x11, 0x72, 0x78, 0xdf, 0xf4, 0xea, 0x4d, 0xdb, 0xc0, 0xbc, 0x8d, 0x40, 0xed, 0x4c, + 0x80, 0x57, 0x6c, 0x43, 0xb1, 0xb3, 0x80, 0xc9, 0xde, 0x94, 0x1c, 0xc9, 0x9b, 0xc2, 0xeb, 0x92, + 0xd4, 0x08, 0xd7, 0x25, 0xb1, 0x76, 0xca, 0x1d, 0x8f, 0x9d, 0xca, 0x5f, 0x27, 0xa0, 0x10, 0x3d, + 0x27, 0xbe, 0x1f, 0x5b, 0x50, 0xdd, 0x4d, 0xc9, 0x91, 0x77, 0xd3, 0x9b, 0x30, 0x41, 0x12, 0x49, + 0xdd, 0xf3, 0xf8, 0xef, 0x00, 0x52, 0x34, 0x1b, 0x64, 0xb1, 0xcd, 0xb7, 0x56, 0x04, 0x5c, 0x89, + 0x6d, 0x12, 0x7c, 0xc0, 0x75, 0xc7, 0x1e, 0xd1, 0x75, 0x3f, 0x4d, 0xc0, 0x84, 0x72, 0x1e, 0xfe, + 0xf8, 0x42, 0x5a, 0x79, 0x0a, 0x26, 0x94, 0x34, 0xb3, 0xfc, 0x19, 0xf3, 0x33, 0xf5, 0xf4, 0xfb, + 0xf1, 0xad, 0xcb, 0x24, 0x8c, 0xcb, 0xf9, 0x6a, 0xb9, 0x0a, 0x53, 0x91, 0xf4, 0x52, 0x9e, 0x80, + 0x36, 0xca, 0x04, 0xca, 0x73, 0x30, 0x13, 0x97, 0x15, 0x95, 0xaf, 0xc1, 0x4c, 0x5c, 0xbe, 0xf2, + 0xe8, 0x0a, 0xde, 0xe0, 0xef, 0x43, 0x58, 0x66, 0xf1, 0xe8, 0xfc, 0xff, 0x96, 0x12, 0x4d, 0x91, + 0xf0, 0x57, 0x34, 0xef, 0x41, 0xa1, 0x2b, 0x3e, 0xea, 0x0f, 0xad, 0xdd, 0x69, 0x21, 0x14, 0xd0, + 0xaf, 0x45, 0x4a, 0xd7, 0x49, 0x15, 0xa3, 0xca, 0xe6, 0xd5, 0x74, 0x62, 0x44, 0xd9, 0xb5, 0x48, + 0x59, 0x3d, 0xa9, 0x62, 0xd0, 0x5f, 0xc3, 0x49, 0xf1, 0x52, 0x75, 0x0f, 0x8b, 0x81, 0x27, 0x87, + 0x0a, 0x67, 0x3f, 0xfa, 0x09, 0x18, 0xa2, 0x23, 0x9f, 0x8a, 0xa0, 0x22, 0xe2, 0xf9, 0xd8, 0x53, + 0xa3, 0x8a, 0x8f, 0x0e, 0x7e, 0x2a, 0x82, 0x42, 0x1b, 0x30, 0x13, 0x5d, 0x75, 0xa9, 0xdc, 0x2e, + 0xf5, 0x7b, 0xa5, 0x33, 0xea, 0x5a, 0xaa, 0x75, 0xf7, 0xc9, 0x01, 0xa4, 0x2a, 0x51, 0x2a, 0xe0, + 0xd3, 0x31, 0x12, 0x6b, 0x83, 0x95, 0xfc, 0xc9, 0x01, 0x64, 0xf9, 0x73, 0x0d, 0xa6, 0x22, 0xbf, + 0x9d, 0x42, 0x57, 0x21, 0x4b, 0x7f, 0xdc, 0xfc, 0x60, 0x2f, 0xa1, 0x7e, 0x48, 0xe9, 0x94, 0x55, + 0xc8, 0x70, 0x10, 0x7a, 0x09, 0x72, 0xc1, 0x4f, 0xac, 0xf8, 0xf3, 0x17, 0xb6, 0x67, 0x05, 0x50, + 0xd9, 0xb3, 0x02, 0x58, 0xfe, 0xbb, 0x04, 0x9c, 0x1e, 0xfa, 0xbb, 0xaa, 0x27, 0xde, 0xb0, 0x39, + 0x54, 0xdf, 0x29, 0xd2, 0x6d, 0x49, 0x8d, 0xd8, 0x6d, 0xf9, 0x5c, 0x83, 0xb9, 0xf8, 0xdf, 0x5d, + 0x1d, 0xb9, 0x03, 0xa7, 0x4e, 0x23, 0x31, 0xfa, 0x2b, 0xa3, 0x71, 0xf9, 0x27, 0x52, 0x47, 0x1e, + 0xc6, 0x25, 0x18, 0xeb, 0xda, 0x76, 0xdb, 0xe5, 0x8f, 0x00, 0x29, 0x29, 0x05, 0xc8, 0xa4, 0x14, + 0x70, 0xc8, 0x86, 0xdf, 0x1f, 0x35, 0x11, 0x0b, 0xc3, 0x5f, 0x82, 0xfd, 0x88, 0x5c, 0xe8, 0x99, + 0xe7, 0x21, 0x2b, 0x9e, 0x38, 0x21, 0x80, 0xf4, 0x3b, 0x5b, 0xab, 0x5b, 0xab, 0x57, 0x0b, 0x27, + 0x50, 0x1e, 0x32, 0x1b, 0xab, 0xb7, 0xae, 0xde, 0xb8, 0x75, 0xad, 0xa0, 0x91, 0x8f, 0xda, 0xd6, + 0xad, 0x5b, 0xe4, 0x23, 0xf1, 0xcc, 0x4d, 0xf9, 0x25, 0x39, 0xaf, 0x2e, 0xc6, 0x21, 0xbb, 0xd2, + 0xed, 0xd2, 0x63, 0x8e, 0xf1, 0xae, 0xee, 0x99, 0xe4, 0x6c, 0x2a, 0x68, 0x28, 0x03, 0xc9, 0xdb, + 0xb7, 0xd7, 0x0b, 0x09, 0x34, 0x03, 0x85, 0xab, 0x58, 0x37, 0xda, 0xa6, 0x85, 0xc5, 0xd9, 0x5a, + 0x48, 0x56, 0xef, 0x7d, 0xf5, 0xcd, 0x82, 0xf6, 0xf5, 0x37, 0x0b, 0xda, 0xaf, 0xbe, 0x59, 0xd0, + 0xee, 0x7f, 0xbb, 0x70, 0xe2, 0xeb, 0x6f, 0x17, 0x4e, 0xfc, 0xe2, 0xdb, 0x85, 0x13, 0xef, 0x3d, + 0x2f, 0xfd, 0xdd, 0x09, 0xb6, 0x7e, 0x5d, 0xc7, 0x26, 0x47, 0x1e, 0xff, 0x5a, 0x8a, 0xfe, 0xa5, + 0x8d, 0x7f, 0x4d, 0x9c, 0x5b, 0xa1, 0x9f, 0x1b, 0x8c, 0xae, 0x72, 0xc3, 0xae, 0x30, 0x00, 0xfd, + 0x63, 0x09, 0x6e, 0x23, 0x4d, 0xff, 0x28, 0xc2, 0x8b, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x9f, + 0x35, 0xa0, 0x61, 0xa4, 0x43, 0x00, 0x00, +} + +func (m *EventSequence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSequence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EventSequence) MarshalToSizedBuffer(dAtA []byte) (int, error) { @@ -4221,27 +4608,6 @@ func (m *EventSequence_Event_JobRunErrors) MarshalToSizedBuffer(dAtA []byte) (in } return len(dAtA) - i, nil } -func (m *EventSequence_Event_JobDuplicateDetected) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventSequence_Event_JobDuplicateDetected) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.JobDuplicateDetected != nil { - { - size, err := m.JobDuplicateDetected.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - return len(dAtA) - i, nil -} func (m *EventSequence_Event_StandaloneIngressInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -4403,6 +4769,52 @@ func (m *EventSequence_Event_JobPreemptionRequested) MarshalToSizedBuffer(dAtA [ } return len(dAtA) - i, nil } +func (m *EventSequence_Event_JobRunCancelled) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSequence_Event_JobRunCancelled) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JobRunCancelled != nil { + { + size, err := m.JobRunCancelled.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + return len(dAtA) - i, nil +} +func (m *EventSequence_Event_JobValidated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSequence_Event_JobValidated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JobValidated != nil { + { + size, err := m.JobValidated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + return len(dAtA) - i, nil +} func (m *ResourceUtilisation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4423,6 +4835,20 @@ func (m *ResourceUtilisation) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x3a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x32 + } if len(m.TotalCumulativeUsage) > 0 { for k := range m.TotalCumulativeUsage { v := m.TotalCumulativeUsage[k] @@ -4565,10 +4991,12 @@ func (m *SubmitJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.QueueTtlSeconds != 0 { - i = encodeVarintEvents(dAtA, i, uint64(m.QueueTtlSeconds)) + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) i-- - dAtA[i] = 0x68 + dAtA[i] = 0x72 } if m.IsDuplicate { i-- @@ -5033,6 +5461,13 @@ func (m *ReprioritiseJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if m.Priority != 0 { i = encodeVarintEvents(dAtA, i, uint64(m.Priority)) i-- @@ -5073,6 +5508,13 @@ func (m *JobRequeued) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x22 + } if m.UpdateSequenceNumber != 0 { i = encodeVarintEvents(dAtA, i, uint64(m.UpdateSequenceNumber)) i-- @@ -5153,6 +5595,13 @@ func (m *ReprioritisedJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if m.Priority != 0 { i = encodeVarintEvents(dAtA, i, uint64(m.Priority)) i-- @@ -5193,6 +5642,13 @@ func (m *CancelJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if len(m.Reason) > 0 { i -= len(m.Reason) copy(dAtA[i:], m.Reason) @@ -5236,20 +5692,20 @@ func (m *JobSetFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.States) > 0 { - dAtA46 := make([]byte, len(m.States)*10) - var j45 int + dAtA47 := make([]byte, len(m.States)*10) + var j46 int for _, num := range m.States { for num >= 1<<7 { - dAtA46[j45] = uint8(uint64(num)&0x7f | 0x80) + dAtA47[j46] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j45++ + j46++ } - dAtA46[j45] = uint8(num) - j45++ + dAtA47[j46] = uint8(num) + j46++ } - i -= j45 - copy(dAtA[i:], dAtA46[:j45]) - i = encodeVarintEvents(dAtA, i, uint64(j45)) + i -= j46 + copy(dAtA[i:], dAtA47[:j46]) + i = encodeVarintEvents(dAtA, i, uint64(j46)) i-- dAtA[i] = 0xa } @@ -5284,20 +5740,20 @@ func (m *CancelJobSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 } if len(m.States) > 0 { - dAtA48 := make([]byte, len(m.States)*10) - var j47 int + dAtA49 := make([]byte, len(m.States)*10) + var j48 int for _, num := range m.States { for num >= 1<<7 { - dAtA48[j47] = uint8(uint64(num)&0x7f | 0x80) + dAtA49[j48] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j47++ + j48++ } - dAtA48[j47] = uint8(num) - j47++ + dAtA49[j48] = uint8(num) + j48++ } - i -= j47 - copy(dAtA[i:], dAtA48[:j47]) - i = encodeVarintEvents(dAtA, i, uint64(j47)) + i -= j48 + copy(dAtA[i:], dAtA49[:j48]) + i = encodeVarintEvents(dAtA, i, uint64(j48)) i-- dAtA[i] = 0xa } @@ -5324,6 +5780,13 @@ func (m *CancelledJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if len(m.Reason) > 0 { i -= len(m.Reason) copy(dAtA[i:], m.Reason) @@ -5366,6 +5829,13 @@ func (m *JobSucceeded) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if len(m.ResourceInfos) > 0 { for iNdEx := len(m.ResourceInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -5415,6 +5885,20 @@ func (m *JobRunLeased) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x5a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x52 + } if m.PodRequirementsOverlay != nil { { size, err := m.PodRequirementsOverlay.MarshalToSizedBuffer(dAtA[:i]) @@ -5508,6 +5992,20 @@ func (m *JobRunAssigned) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x2a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x22 + } if len(m.ResourceInfos) > 0 { for iNdEx := len(m.ResourceInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -5569,6 +6067,20 @@ func (m *JobRunRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x2a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x22 + } if len(m.ResourceInfos) > 0 { for iNdEx := len(m.ResourceInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -5798,6 +6310,20 @@ func (m *StandaloneIngressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x52 + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x4a + } if len(m.PodNamespace) > 0 { i -= len(m.PodNamespace) copy(dAtA[i:], m.PodNamespace) @@ -5900,6 +6426,20 @@ func (m *JobRunSucceeded) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x2a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x22 + } if len(m.ResourceInfos) > 0 { for iNdEx := len(m.ResourceInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -5961,6 +6501,13 @@ func (m *JobErrors) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if len(m.Errors) > 0 { for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { { @@ -6010,6 +6557,20 @@ func (m *JobRunErrors) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x2a + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x22 + } if len(m.Errors) > 0 { for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { { @@ -6324,12 +6885,33 @@ func (m *Error_GangJobUnschedulable) MarshalToSizedBuffer(dAtA []byte) (int, err } return len(dAtA) - i, nil } -func (m *KubernetesError) Marshal() (dAtA []byte, err error) { +func (m *Error_JobRejected) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Error_JobRejected) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JobRejected != nil { + { + size, err := m.JobRejected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *KubernetesError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } return dAtA[:n], nil } @@ -6386,6 +6968,13 @@ func (m *PodError) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.DebugMessage) > 0 { + i -= len(m.DebugMessage) + copy(dAtA[i:], m.DebugMessage) + i = encodeVarintEvents(dAtA, i, uint64(len(m.DebugMessage))) + i-- + dAtA[i] = 0x3a + } if m.KubernetesReason != 0 { i = encodeVarintEvents(dAtA, i, uint64(m.KubernetesReason)) i-- @@ -6518,6 +7107,13 @@ func (m *PodLeaseReturned) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.DebugMessage) > 0 { + i -= len(m.DebugMessage) + copy(dAtA[i:], m.DebugMessage) + i = encodeVarintEvents(dAtA, i, uint64(len(m.DebugMessage))) + i-- + dAtA[i] = 0x2a + } if m.RunAttempted { i-- if m.RunAttempted { @@ -6792,7 +7388,7 @@ func (m *GangJobUnschedulable) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobDuplicateDetected) Marshal() (dAtA []byte, err error) { +func (m *JobRejected) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6802,37 +7398,20 @@ func (m *JobDuplicateDetected) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *JobDuplicateDetected) MarshalTo(dAtA []byte) (int, error) { +func (m *JobRejected) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *JobDuplicateDetected) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *JobRejected) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.OldJobId != nil { - { - size, err := m.OldJobId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.NewJobId != nil { - { - size, err := m.NewJobId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0xa } @@ -6859,6 +7438,20 @@ func (m *JobRunPreempted) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.PreemptedRunIdStr) > 0 { + i -= len(m.PreemptedRunIdStr) + copy(dAtA[i:], m.PreemptedRunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.PreemptedRunIdStr))) + i-- + dAtA[i] = 0x32 + } + if len(m.PreemptedJobIdStr) > 0 { + i -= len(m.PreemptedJobIdStr) + copy(dAtA[i:], m.PreemptedJobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.PreemptedJobIdStr))) + i-- + dAtA[i] = 0x2a + } if m.PreemptiveRunId != nil { { size, err := m.PreemptiveRunId.MarshalToSizedBuffer(dAtA[:i]) @@ -6970,6 +7563,20 @@ func (m *JobRunPreemptionRequested) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x22 + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if m.JobId != nil { { size, err := m.JobId.MarshalToSizedBuffer(dAtA[:i]) @@ -7017,6 +7624,113 @@ func (m *JobPreemptionRequested) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x12 + } + if m.JobId != nil { + { + size, err := m.JobId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JobValidated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobValidated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobValidated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } + if len(m.Pools) > 0 { + for iNdEx := len(m.Pools) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Pools[iNdEx]) + copy(dAtA[i:], m.Pools[iNdEx]) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Pools[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.JobId != nil { + { + size, err := m.JobId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JobRunCancelled) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobRunCancelled) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobRunCancelled) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RunIdStr) > 0 { + i -= len(m.RunIdStr) + copy(dAtA[i:], m.RunIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.RunIdStr))) + i-- + dAtA[i] = 0x22 + } + if len(m.JobIdStr) > 0 { + i -= len(m.JobIdStr) + copy(dAtA[i:], m.JobIdStr) + i = encodeVarintEvents(dAtA, i, uint64(len(m.JobIdStr))) + i-- + dAtA[i] = 0x1a + } if m.JobId != nil { { size, err := m.JobId.MarshalToSizedBuffer(dAtA[:i]) @@ -7027,6 +7741,18 @@ func (m *JobPreemptionRequested) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintEvents(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + if m.RunId != nil { + { + size, err := m.RunId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -7260,18 +7986,6 @@ func (m *EventSequence_Event_JobRunErrors) Size() (n int) { } return n } -func (m *EventSequence_Event_JobDuplicateDetected) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.JobDuplicateDetected != nil { - l = m.JobDuplicateDetected.Size() - n += 1 + l + sovEvents(uint64(l)) - } - return n -} func (m *EventSequence_Event_StandaloneIngressInfo) Size() (n int) { if m == nil { return 0 @@ -7356,6 +8070,30 @@ func (m *EventSequence_Event_JobPreemptionRequested) Size() (n int) { } return n } +func (m *EventSequence_Event_JobRunCancelled) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JobRunCancelled != nil { + l = m.JobRunCancelled.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *EventSequence_Event_JobValidated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JobValidated != nil { + l = m.JobValidated.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} func (m *ResourceUtilisation) Size() (n int) { if m == nil { return 0 @@ -7392,6 +8130,14 @@ func (m *ResourceUtilisation) Size() (n int) { n += mapEntrySize + 1 + sovEvents(uint64(mapEntrySize)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7460,8 +8206,9 @@ func (m *SubmitJob) Size() (n int) { if m.IsDuplicate { n += 2 } - if m.QueueTtlSeconds != 0 { - n += 1 + sovEvents(uint64(m.QueueTtlSeconds)) + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) } return n } @@ -7631,6 +8378,10 @@ func (m *ReprioritiseJob) Size() (n int) { if m.Priority != 0 { n += 1 + sovEvents(uint64(m.Priority)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7651,6 +8402,10 @@ func (m *JobRequeued) Size() (n int) { if m.UpdateSequenceNumber != 0 { n += 1 + sovEvents(uint64(m.UpdateSequenceNumber)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7679,6 +8434,10 @@ func (m *ReprioritisedJob) Size() (n int) { if m.Priority != 0 { n += 1 + sovEvents(uint64(m.Priority)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7696,6 +8455,10 @@ func (m *CancelJob) Size() (n int) { if l > 0 { n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7749,6 +8512,10 @@ func (m *CancelledJob) Size() (n int) { if l > 0 { n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7768,6 +8535,10 @@ func (m *JobSucceeded) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7806,6 +8577,14 @@ func (m *JobRunLeased) Size() (n int) { l = m.PodRequirementsOverlay.Size() n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7829,6 +8608,14 @@ func (m *JobRunAssigned) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7852,6 +8639,14 @@ func (m *JobRunRunning) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7973,6 +8768,14 @@ func (m *StandaloneIngressInfo) Size() (n int) { if l > 0 { n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -7996,6 +8799,14 @@ func (m *JobRunSucceeded) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8015,6 +8826,10 @@ func (m *JobErrors) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8038,6 +8853,14 @@ func (m *JobRunErrors) Size() (n int) { n += 1 + l + sovEvents(uint64(l)) } } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8188,6 +9011,18 @@ func (m *Error_GangJobUnschedulable) Size() (n int) { } return n } +func (m *Error_JobRejected) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JobRejected != nil { + l = m.JobRejected.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} func (m *KubernetesError) Size() (n int) { if m == nil { return 0 @@ -8235,8 +9070,12 @@ func (m *PodError) Size() (n int) { if m.KubernetesReason != 0 { n += 1 + sovEvents(uint64(m.KubernetesReason)) } - return n -} + l = len(m.DebugMessage) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} func (m *ContainerError) Size() (n int) { if m == nil { @@ -8285,6 +9124,10 @@ func (m *PodLeaseReturned) Size() (n int) { if m.RunAttempted { n += 2 } + l = len(m.DebugMessage) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8389,18 +9232,14 @@ func (m *GangJobUnschedulable) Size() (n int) { return n } -func (m *JobDuplicateDetected) Size() (n int) { +func (m *JobRejected) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.NewJobId != nil { - l = m.NewJobId.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.OldJobId != nil { - l = m.OldJobId.Size() + l = len(m.Message) + if l > 0 { n += 1 + l + sovEvents(uint64(l)) } return n @@ -8428,6 +9267,14 @@ func (m *JobRunPreempted) Size() (n int) { l = m.PreemptiveRunId.Size() n += 1 + l + sovEvents(uint64(l)) } + l = len(m.PreemptedJobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.PreemptedRunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8461,6 +9308,14 @@ func (m *JobRunPreemptionRequested) Size() (n int) { l = m.JobId.Size() n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -8474,6 +9329,58 @@ func (m *JobPreemptionRequested) Size() (n int) { l = m.JobId.Size() n += 1 + l + sovEvents(uint64(l)) } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *JobValidated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JobId != nil { + l = m.JobId.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if len(m.Pools) > 0 { + for _, s := range m.Pools { + l = len(s) + n += 1 + l + sovEvents(uint64(l)) + } + } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *JobRunCancelled) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RunId != nil { + l = m.RunId.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.JobId != nil { + l = m.JobId.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.JobIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RunIdStr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } return n } @@ -9214,41 +10121,6 @@ func (m *EventSequence_Event) Unmarshal(dAtA []byte) error { } m.Event = &EventSequence_Event_JobRunErrors{v} iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobDuplicateDetected", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &JobDuplicateDetected{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Event = &EventSequence_Event_JobDuplicateDetected{v} - iNdEx = postIndex case 16: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StandaloneIngressInfo", wireType) @@ -9530,6 +10402,76 @@ func (m *EventSequence_Event) Unmarshal(dAtA []byte) error { } m.Event = &EventSequence_Event_JobPreemptionRequested{v} iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobRunCancelled", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &JobRunCancelled{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &EventSequence_Event_JobRunCancelled{v} + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobValidated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &JobValidated{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &EventSequence_Event_JobValidated{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -9946,6 +10888,70 @@ func (m *ResourceUtilisation) Unmarshal(dAtA []byte) error { } m.TotalCumulativeUsage[mapkey] = *mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -10390,11 +11396,11 @@ func (m *SubmitJob) Unmarshal(dAtA []byte) error { } } m.IsDuplicate = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) } - m.QueueTtlSeconds = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvents @@ -10404,11 +11410,24 @@ func (m *SubmitJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.QueueTtlSeconds |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -11411,22 +12430,54 @@ func (m *ReprioritiseJob) Unmarshal(dAtA []byte) error { break } } - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) } - iNdEx += skippy - } - } - + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { return io.ErrUnexpectedEOF } @@ -11552,6 +12603,38 @@ func (m *JobRequeued) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -11726,6 +12809,38 @@ func (m *ReprioritisedJob) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -11844,6 +12959,38 @@ func (m *CancelJob) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -12232,6 +13379,38 @@ func (m *CancelledJob) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -12352,6 +13531,38 @@ func (m *JobSucceeded) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -12632,6 +13843,70 @@ func (m *JobRunLeased) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -12788,9 +14063,73 @@ func (m *JobRunAssigned) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) if err != nil { return err } @@ -12944,6 +14283,70 @@ func (m *JobRunRunning) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -13782,6 +15185,70 @@ func (m *StandaloneIngressInfo) Unmarshal(dAtA []byte) error { } m.PodNamespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -13938,6 +15405,70 @@ func (m *JobRunSucceeded) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -14058,17 +15589,49 @@ func (m *JobErrors) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } iNdEx += skippy } @@ -14214,6 +15777,70 @@ func (m *JobRunErrors) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -14669,6 +16296,41 @@ func (m *Error) Unmarshal(dAtA []byte) error { } m.Reason = &Error_GangJobUnschedulable{v} iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobRejected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &JobRejected{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Reason = &Error_JobRejected{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -15009,6 +16671,38 @@ func (m *PodError) Unmarshal(dAtA []byte) error { break } } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DebugMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -15354,6 +17048,38 @@ func (m *PodLeaseReturned) Unmarshal(dAtA []byte) error { } } m.RunAttempted = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DebugMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -16027,7 +17753,7 @@ func (m *GangJobUnschedulable) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobDuplicateDetected) Unmarshal(dAtA []byte) error { +func (m *JobRejected) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16050,53 +17776,17 @@ func (m *JobDuplicateDetected) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobDuplicateDetected: wiretype end group for non-group") + return fmt.Errorf("proto: JobRejected: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobDuplicateDetected: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobRejected: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewJobId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NewJobId == nil { - m.NewJobId = &Uuid{} - } - if err := m.NewJobId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldJobId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowEvents @@ -16106,27 +17796,23 @@ func (m *JobDuplicateDetected) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthEvents } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthEvents } if postIndex > l { return io.ErrUnexpectedEOF } - if m.OldJobId == nil { - m.OldJobId = &Uuid{} - } - if err := m.OldJobId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -16322,24 +18008,88 @@ func (m *JobRunPreempted) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptedJobIdStr", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreemptedJobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptedRunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreemptedRunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } return nil } @@ -16549,6 +18299,70 @@ func (m *JobRunPreemptionRequested) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) @@ -16635,6 +18449,374 @@ func (m *JobPreemptionRequested) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobValidated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobValidated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobValidated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JobId == nil { + m.JobId = &Uuid{} + } + if err := m.JobId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pools", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pools = append(m.Pools, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobRunCancelled) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobRunCancelled: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobRunCancelled: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunId == nil { + m.RunId = &Uuid{} + } + if err := m.RunId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JobId == nil { + m.JobId = &Uuid{} + } + if err := m.JobId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunIdStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunIdStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) diff --git a/pkg/armadaevents/events.proto b/pkg/armadaevents/events.proto index f70d5f2a21f..a3741729757 100644 --- a/pkg/armadaevents/events.proto +++ b/pkg/armadaevents/events.proto @@ -83,7 +83,6 @@ message EventSequence { JobRunRunning jobRunRunning= 12; JobRunSucceeded jobRunSucceeded = 13; JobRunErrors jobRunErrors = 14; - JobDuplicateDetected jobDuplicateDetected = 15; StandaloneIngressInfo standaloneIngressInfo = 16; ResourceUtilisation resourceUtilisation = 17; JobRunPreempted jobRunPreempted = 19; @@ -91,6 +90,8 @@ message EventSequence { JobRunPreemptionRequested jobRunPreemptionRequested = 21; JobRequeued jobRequeued = 22; JobPreemptionRequested jobPreemptionRequested = 23; + JobRunCancelled jobRunCancelled = 24; + JobValidated jobValidated = 25; } } // The system is namespaced by queue, and all events are associated with a job set. @@ -114,6 +115,9 @@ message ResourceUtilisation { KubernetesResourceInfo resource_info = 3; map max_resources_for_period = 4 [(gogoproto.nullable) = false]; map total_cumulative_usage = 5 [(gogoproto.nullable) = false]; + // This is a string representation of the job_id. Eventually we will retire the job_id field and just use strings + string job_id_str = 6; + string run_id_str = 7; } // A UUID, encoded in accordance with section 4.1.2 of RFC 4122 @@ -165,8 +169,9 @@ message SubmitJob { string scheduler = 11; // Indicates whether job is a duplicate bool isDuplicate = 12; - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - int64 queue_ttl_seconds = 13; + // Ordinal 13 was originally used for queue_ttl_seconds + // This is a string representation of the job_id. Eventually we will retire the job_id field and just use strings + string job_id_str = 14; } // Kubernetes objects that can serve as main objects for an Armada job. @@ -218,6 +223,7 @@ message PodSpecWithAvoidList { message ReprioritiseJob { Uuid job_id = 1; uint32 priority = 2; + string job_id_str = 3; } message JobRequeued { @@ -225,6 +231,7 @@ message JobRequeued { schedulerobjects.JobSchedulingInfo scheduling_info = 2; // Used by the scheduler to maintain a consistent state int32 update_sequence_number = 3; + string job_id_str = 4; } // Set the priority of all jobs part of a job set. @@ -238,6 +245,7 @@ message ReprioritiseJobSet { message ReprioritisedJob { Uuid job_id = 1; uint32 priority = 2; + string job_id_str = 3; } // A request to cancel a particular job. @@ -246,6 +254,7 @@ message ReprioritisedJob { message CancelJob { Uuid job_id = 1; string reason = 2; + string job_id_str = 3; } @@ -273,6 +282,7 @@ message CancelJobSet { message CancelledJob { Uuid job_id = 1; string reason = 2; + string job_id_str = 3; } message JobSucceeded { @@ -281,6 +291,7 @@ message JobSucceeded { // for each resource created for the job run. // TODO: remove this once we have fixed the external api repeated KubernetesResourceInfo resourceInfos = 2; + string job_id_str = 3; } // Indicates that a job has been leased to a cluster by the Armada scheduler. @@ -309,6 +320,8 @@ message JobRunLeased { // for example, it may add additional tolerations to runs that are scheduled // as away jobs. schedulerobjects.PodRequirements pod_requirements_overlay = 9; + string job_id_str = 10; + string run_id_str = 11; } // Indicates that a job has been assigned to nodes by Kubernetes. @@ -319,6 +332,8 @@ message JobRunAssigned { // for each resource created for the job run. // Included here and in JobRunRunning for compatibility with legacy messages. repeated KubernetesResourceInfo resourceInfos = 3; + string job_id_str = 4; + string run_id_str = 5; } // Indicates that the resources required by the job have been created and that the job is now running. @@ -328,6 +343,8 @@ message JobRunRunning { // Runtime information, e.g., which node the job is running on, its IP address etc, // for each resource created for the job run. repeated KubernetesResourceInfo resourceInfos = 3; + string job_id_str = 4; + string run_id_str = 5; } // Message containing runtime information about some resource created for a job. @@ -363,10 +380,12 @@ message StandaloneIngressInfo { ObjectMeta objectMeta = 3; map ingress_addresses = 4; // The legacy message bundles info associated with the pod. - string node_name = 5; - int32 pod_number = 6; - string pod_name = 7; - string pod_namespace = 8; + string node_name = 5; + int32 pod_number = 6; + string pod_name = 7; + string pod_namespace = 8; + string job_id_str = 9; + string run_id_str = 10; } // Indicates that the job finished successfully (i.e., in the expected manner). @@ -377,6 +396,8 @@ message JobRunSucceeded { // for each resource created for the job run. // TODO: remove this once we have fixed the external api repeated KubernetesResourceInfo resourceInfos = 3; + string job_id_str = 4; + string run_id_str = 5; } // Message containing a set of errors associated with a particular job. @@ -386,6 +407,7 @@ message JobErrors { Uuid job_id = 1; // A set of errors that occurred within some context. repeated Error errors = 2; + string job_id_str = 3; } // Message containing a set of errors associated with a particular job run. @@ -400,6 +422,8 @@ message JobRunErrors { Uuid job_id = 2; // A set of errors that occurred within some context. repeated Error errors = 3; + string job_id_str = 4; + string run_id_str = 5; } // Represents a failure that took place in the course of a job run (i.e., an attempt to run a job). @@ -426,6 +450,7 @@ message Error { PodTerminated podTerminated = 10; JobRunPreemptedError jobRunPreemptedError = 11; GangJobUnschedulable gangJobUnschedulable = 12; + JobRejected jobRejected = 13; } } @@ -454,6 +479,7 @@ message PodError { int32 pod_number = 4; repeated ContainerError containerErrors = 5; KubernetesReason kubernetes_reason = 6; + string debugMessage = 7; } message ContainerError { @@ -475,6 +501,7 @@ message PodLeaseReturned { string message = 2; int32 pod_number = 3; bool run_attempted =4; + string debugMessage = 5; } // Indicates that the lease on the job that the pod was part of could not be renewed. @@ -512,11 +539,8 @@ message GangJobUnschedulable{ string message = 1; } -// Generated by the scheduler whenever it detects a SubmitJob message that includes a previously used deduplication id -// (i.e., when it detects a duplicate job submission). -message JobDuplicateDetected { - Uuid new_job_id = 1; - Uuid old_job_id = 2; +message JobRejected { + string message = 1; } // Message to indicate that a JobRun has been preempted. @@ -529,6 +553,8 @@ message JobRunPreempted{ Uuid preemptive_job_id = 3; // Uuid of the job run that caused the preemption. Uuid preemptive_run_id = 4; + string preempted_job_id_str = 5; + string preempted_run_id_str = 6; } // Message used internally by Armada to see if messages can be propagated through a pulsar partition @@ -543,9 +569,28 @@ message PartitionMarker { message JobRunPreemptionRequested { Uuid run_id = 1; Uuid job_id = 2; + string job_id_str = 3; + string run_id_str= 4; } // Indicates that a user has requested for the job to be pre-empted. message JobPreemptionRequested { + Uuid job_id = 1; + string job_id_str = 2; +} + +// Indicates that the scheduler is happy with the job +message JobValidated { Uuid job_id = 1; + repeated string pools = 2; + string job_id_str = 3; +} + +// Generated by the scheduler when a job is cancelled, all active job runs are also cancelled +// One such message is generated per job run that was cancelled. +message JobRunCancelled { + Uuid run_id = 1; + Uuid job_id = 2; + string job_id_str = 3; + string run_id_str= 4; } diff --git a/pkg/armadaevents/events_util.go b/pkg/armadaevents/events_util.go index 625035b3567..0a142522e48 100644 --- a/pkg/armadaevents/events_util.go +++ b/pkg/armadaevents/events_util.go @@ -141,12 +141,6 @@ func (ev *EventSequence_Event) UnmarshalJSON(data []byte) error { return err } ev.Event = &jobRunErrors - case "jobDuplicateDetected": - var jobDuplicateDetected EventSequence_Event_JobDuplicateDetected - if err = json.Unmarshal(rawEvent.EventBytes, &jobDuplicateDetected); err != nil { - return err - } - ev.Event = &jobDuplicateDetected case "standaloneIngressInfo": var standaloneIngressInfo EventSequence_Event_StandaloneIngressInfo if err = json.Unmarshal(rawEvent.EventBytes, &standaloneIngressInfo); err != nil { diff --git a/pkg/armadaevents/jobid.go b/pkg/armadaevents/jobid.go index 4bd2d33bdee..05cc110b218 100644 --- a/pkg/armadaevents/jobid.go +++ b/pkg/armadaevents/jobid.go @@ -32,14 +32,16 @@ func JobIdFromEvent(event *EventSequence_Event) (*Uuid, error) { return e.JobErrors.JobId, nil case *EventSequence_Event_JobRunErrors: return e.JobRunErrors.JobId, nil - case *EventSequence_Event_JobDuplicateDetected: - return e.JobDuplicateDetected.NewJobId, nil case *EventSequence_Event_StandaloneIngressInfo: return e.StandaloneIngressInfo.JobId, nil case *EventSequence_Event_JobRunPreempted: return e.JobRunPreempted.PreemptedJobId, nil + case *EventSequence_Event_JobRunCancelled: + return e.JobRunCancelled.JobId, nil case *EventSequence_Event_JobRequeued: return e.JobRequeued.JobId, nil + case *EventSequence_Event_JobValidated: + return e.JobValidated.JobId, nil default: err := errors.WithStack(&armadaerrors.ErrInvalidArgument{ Name: "event.Event", diff --git a/pkg/armadaevents/uuid.go b/pkg/armadaevents/uuid.go index d7194dddea4..43c4c9a3ce6 100644 --- a/pkg/armadaevents/uuid.go +++ b/pkg/armadaevents/uuid.go @@ -105,3 +105,7 @@ func ProtoUuidFromUuidString(uuidString string) (*Uuid, error) { func MustUlidStringFromProtoUuid(id *Uuid) string { return strings.ToLower(UlidFromProtoUuid(id).String()) } + +func MustUuidStringFromProtoUuid(id *Uuid) string { + return strings.ToLower(UuidFromProtoUuid(id).String()) +} diff --git a/pkg/client/command_line.go b/pkg/client/command_line.go index 432e763ee0e..6e522eb934c 100644 --- a/pkg/client/command_line.go +++ b/pkg/client/command_line.go @@ -20,6 +20,8 @@ var mergedConfigFiles []string // path to config file, as given by viper flags var cfgFile string +var defaultArmadaConnectionUrl string = "localhost:50051" + // AddArmadaApiConnectionCommandlineArgs adds command-line flags to a cobra command. // Arguments given via these flags are later used by LoadCommandlineArgsFromConfigFile. // Hence, apps that use the client package to load config should call this function as part of @@ -31,8 +33,7 @@ func AddArmadaApiConnectionCommandlineArgs(rootCmd *cobra.Command) { panic(err) } - // This is to be removed eventually - rootCmd.PersistentFlags().String("armadaUrl", "", "specify armada server url") + rootCmd.PersistentFlags().String("armadaUrl", defaultArmadaConnectionUrl, "specify armada server url") err = viper.BindPFlag("armadaUrl", rootCmd.PersistentFlags().Lookup("armadaUrl")) if err != nil { panic(err) @@ -190,15 +191,14 @@ func ExtractCommandlineArmadaApiConnectionDetails() (*ApiConnectionDetails, erro var err error if context := viper.GetString("currentContext"); context != "" { - if viper.GetString("armadaUrl") != "" { - fmt.Printf("Provided armadaUrl %s ignored in favour of context derived connection details\n", viper.GetString("armadaUrl")) - } - subTree := viper.Sub(fmt.Sprintf("contexts.%s", context)) if subTree == nil { return nil, fmt.Errorf("context %s not found under contexts within the Armada config", context) } err = subTree.Unmarshal(apiConnectionDetails) + if viper.GetString("armadaUrl") != defaultArmadaConnectionUrl { + apiConnectionDetails.ArmadaUrl = viper.GetString("armadaUrl") + } } else { fmt.Print("No context defined. This method of providing connection details will soon be deprecated, " + diff --git a/pkg/client/domain/watch.go b/pkg/client/domain/watch.go index be461cb0db3..4efd4425aa1 100644 --- a/pkg/client/domain/watch.go +++ b/pkg/client/domain/watch.go @@ -185,8 +185,6 @@ func updateJobInfo(info *JobInfo, event api.Event) { info.PodStatus = append(info.PodStatus, Submitted) info.PodLastUpdated = append(info.PodLastUpdated, time.Time{}) } - case *api.JobDuplicateFoundEvent: - info.Status = Duplicate case *api.JobQueuedEvent: info.Status = Queued case *api.JobLeasedEvent: @@ -223,8 +221,6 @@ func updateJobInfo(info *JobInfo, event api.Event) { // NOOP case *api.JobUtilisationEvent: info.MaxUsedResources.Max(typed.MaxResourcesForPeriod) - case *api.JobUpdatedEvent: - info.Job = &typed.Job } } @@ -287,8 +283,6 @@ func isLifeCycleEvent(event api.Event) bool { return true case *api.JobQueuedEvent: return true - case *api.JobDuplicateFoundEvent: - return true case *api.JobLeasedEvent: return true case *api.JobLeaseReturnedEvent: diff --git a/pkg/client/watch.go b/pkg/client/watch.go index 03f0abc294f..2b9a96ae498 100644 --- a/pkg/client/watch.go +++ b/pkg/client/watch.go @@ -67,8 +67,6 @@ func WatchJobSetWithJobIdsFilter( FromMessageId: lastMessageId, Watch: waitForNew, ErrorIfMissing: errorOnNotExists, - ForceNew: forceNew, - ForceLegacy: forceLegacy, }, ) diff --git a/scripts/infrastructure-start.sh b/scripts/infrastructure-start.sh new file mode 100755 index 00000000000..6a11bbd5ba6 --- /dev/null +++ b/scripts/infrastructure-start.sh @@ -0,0 +1,2 @@ +#!/usr/bin/sh +mage kind startDependencies diff --git a/scripts/infrastructure-stop.sh b/scripts/infrastructure-stop.sh new file mode 100755 index 00000000000..4a1aab747c8 --- /dev/null +++ b/scripts/infrastructure-stop.sh @@ -0,0 +1,2 @@ +#!/usr/bin/sh +mage stopDependencies kindTeardown diff --git a/testsuite/performance/jobservice/fakearmada/event_server.go b/testsuite/performance/jobservice/fakearmada/event_server.go index c8261d96f7c..33968087015 100644 --- a/testsuite/performance/jobservice/fakearmada/event_server.go +++ b/testsuite/performance/jobservice/fakearmada/event_server.go @@ -33,8 +33,6 @@ func (s *PerformanceTestEventServer) Watch(req *api.WatchRequest, stream api.Eve FromMessageId: req.FromId, Queue: req.Queue, ErrorIfMissing: true, - ForceLegacy: req.ForceLegacy, - ForceNew: req.ForceNew, } return s.GetJobSetEvents(request, stream) } diff --git a/testsuite/testcases/basic/cancel_by_id_1x1.yaml b/testsuite/testcases/basic/cancel_by_id_1x1.yaml index baf90d49dd4..10bf56ec6b7 100644 --- a/testsuite/testcases/basic/cancel_by_id_1x1.yaml +++ b/testsuite/testcases/basic/cancel_by_id_1x1.yaml @@ -10,20 +10,20 @@ jobs: containers: - name: cancel-by-id imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "sleep" - "100s" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- cancel: BY_ID timeout: "100s" expectedEvents: - submitted: - - cancelled: \ No newline at end of file + - cancelled: diff --git a/testsuite/testcases/basic/cancel_by_ids_1x10.yaml b/testsuite/testcases/basic/cancel_by_ids_1x10.yaml index e549f81b86c..623b07c8fb1 100644 --- a/testsuite/testcases/basic/cancel_by_ids_1x10.yaml +++ b/testsuite/testcases/basic/cancel_by_ids_1x10.yaml @@ -10,16 +10,16 @@ jobs: containers: - name: cancel-by-id imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "sleep" - "100s" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- cancel: BY_IDS diff --git a/testsuite/testcases/basic/cancel_by_set_1x1.yaml b/testsuite/testcases/basic/cancel_by_set_1x1.yaml index b6a801c7d05..83bf26fd429 100644 --- a/testsuite/testcases/basic/cancel_by_set_1x1.yaml +++ b/testsuite/testcases/basic/cancel_by_set_1x1.yaml @@ -10,20 +10,20 @@ jobs: containers: - name: cancel-by-set imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "sleep" - "100s" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- cancel: BY_SET timeout: "100s" expectedEvents: - submitted: - - cancelled: \ No newline at end of file + - cancelled: diff --git a/testsuite/testcases/basic/failure_1x1.yaml b/testsuite/testcases/basic/failure_1x1.yaml index 6d4f8194d71..748568a1d0f 100644 --- a/testsuite/testcases/basic/failure_1x1.yaml +++ b/testsuite/testcases/basic/failure_1x1.yaml @@ -10,19 +10,19 @@ jobs: containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "exit" - "1" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "100s" expectedEvents: - submitted: - - failed: \ No newline at end of file + - failed: diff --git a/testsuite/testcases/basic/failure_errimagepull.yaml b/testsuite/testcases/basic/failure_errimagepull.yaml index 0d68f948e89..0759d13797a 100644 --- a/testsuite/testcases/basic/failure_errimagepull.yaml +++ b/testsuite/testcases/basic/failure_errimagepull.yaml @@ -15,14 +15,14 @@ jobs: - "ls" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "300s" expectedEvents: - submitted: - unableToSchedule: - - failed: \ No newline at end of file + - failed: diff --git a/testsuite/testcases/basic/failure_namespace.yaml b/testsuite/testcases/basic/failure_namespace.yaml index 08d4908971a..338176cc1c1 100644 --- a/testsuite/testcases/basic/failure_namespace.yaml +++ b/testsuite/testcases/basic/failure_namespace.yaml @@ -10,18 +10,18 @@ jobs: containers: - name: failure-namespace imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "ls" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "100s" expectedEvents: - submitted: - - failed: \ No newline at end of file + - failed: diff --git a/testsuite/testcases/basic/failure_oom_1x1.yaml b/testsuite/testcases/basic/failure_oom_1x1.yaml index 56dadb02512..819e602dd83 100644 --- a/testsuite/testcases/basic/failure_oom_1x1.yaml +++ b/testsuite/testcases/basic/failure_oom_1x1.yaml @@ -10,16 +10,16 @@ jobs: containers: - name: oom imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "tail" - "/dev/zero" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "300s" diff --git a/testsuite/testcases/basic/ingress.yaml b/testsuite/testcases/basic/ingress.yaml index c6c24a86499..a620cb7effb 100644 --- a/testsuite/testcases/basic/ingress.yaml +++ b/testsuite/testcases/basic/ingress.yaml @@ -15,7 +15,7 @@ jobs: containers: - name: nc imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 # Server responding to the first request with a 200 status code and then exiting. args: ["nc", "-l", "-p", "4000", "-e", "echo", "HTTP/1.1 200 OK\nContent-Length: 12\nConnection: close\nContent-Type: text/html\n\nHello world!"] resources: @@ -34,4 +34,4 @@ timeout: "100s" expectedEvents: - submitted: - ingressInfo: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/basic/priorityclass_default.yaml b/testsuite/testcases/basic/priorityclass_default.yaml index 23ca07af1e2..86ee4b5198d 100644 --- a/testsuite/testcases/basic/priorityclass_default.yaml +++ b/testsuite/testcases/basic/priorityclass_default.yaml @@ -11,19 +11,19 @@ jobs: containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "sleep" - "1" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "130s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/basic/submit_1x1.yaml b/testsuite/testcases/basic/submit_1x1.yaml index a1073d37507..3d8061bbfa3 100644 --- a/testsuite/testcases/basic/submit_1x1.yaml +++ b/testsuite/testcases/basic/submit_1x1.yaml @@ -10,18 +10,18 @@ jobs: containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "ls" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "100s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/basic/submit_2x3.yaml b/testsuite/testcases/basic/submit_2x3.yaml index 654fd0d5c49..1f5728de177 100644 --- a/testsuite/testcases/basic/submit_2x3.yaml +++ b/testsuite/testcases/basic/submit_2x3.yaml @@ -10,18 +10,18 @@ jobs: containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "ls" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "100s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/basic/submit_fileshare_1x1.yaml b/testsuite/testcases/basic/submit_fileshare_1x1.yaml index 7c04bf4c8a9..5cca3e0ddc5 100644 --- a/testsuite/testcases/basic/submit_fileshare_1x1.yaml +++ b/testsuite/testcases/basic/submit_fileshare_1x1.yaml @@ -13,7 +13,7 @@ jobs: initContainers: - name: writer imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 command: - sh - -c @@ -24,10 +24,10 @@ jobs: mountPath: /usr/share/test resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m containers: - name: reader @@ -50,13 +50,13 @@ jobs: mountPath: /usr/share/test resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "100s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/basic/submit_random_clientid.yaml b/testsuite/testcases/basic/submit_random_clientid.yaml index e23f35e39a7..e071a66ec9a 100644 --- a/testsuite/testcases/basic/submit_random_clientid.yaml +++ b/testsuite/testcases/basic/submit_random_clientid.yaml @@ -1,5 +1,5 @@ numBatches: 1 -batchSize: 10 +batchSize: 3 queue: e2e-test-queue randomClientId: true jobs: @@ -11,18 +11,18 @@ jobs: containers: - name: ls imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:3.20.0 args: - "ls" resources: limits: - memory: 10Mi + memory: 20Mi cpu: 100m requests: - memory: 10Mi + memory: 20Mi cpu: 100m --- timeout: "180s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/performance/submit_10x100.yaml b/testsuite/testcases/performance/submit_10x100.yaml index 4856aee7ecf..dda7ca7baed 100644 --- a/testsuite/testcases/performance/submit_10x100.yaml +++ b/testsuite/testcases/performance/submit_10x100.yaml @@ -16,13 +16,13 @@ jobs: - "Hello world!" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 15m requests: - memory: 10Mi + memory: 25Mi cpu: 15m --- timeout: "1800s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/performance/submit_1x1K.yaml b/testsuite/testcases/performance/submit_1x1K.yaml index 75a2cd155ef..ec8d5343512 100644 --- a/testsuite/testcases/performance/submit_1x1K.yaml +++ b/testsuite/testcases/performance/submit_1x1K.yaml @@ -16,13 +16,13 @@ jobs: - "Hello world!" resources: limits: - memory: 10Mi + memory: 25Mi cpu: 100m requests: - memory: 10Mi + memory: 25Mi cpu: 100m --- timeout: "900s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/performance/submit_load_1x500.yaml b/testsuite/testcases/performance/submit_load_1x500.yaml index 4ba03a4b63c..7d1be585249 100644 --- a/testsuite/testcases/performance/submit_load_1x500.yaml +++ b/testsuite/testcases/performance/submit_load_1x500.yaml @@ -21,13 +21,13 @@ jobs: - -v resources: limits: - memory: 10Mi + memory: 25Mi cpu: 50m requests: - memory: 10Mi + memory: 25Mi cpu: 50m --- timeout: "600s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/testsuite/testcases/performance/submit_load_5x100.yaml b/testsuite/testcases/performance/submit_load_5x100.yaml index 80db256b46e..b3cecfc3077 100644 --- a/testsuite/testcases/performance/submit_load_5x100.yaml +++ b/testsuite/testcases/performance/submit_load_5x100.yaml @@ -21,13 +21,13 @@ jobs: - -v resources: limits: - memory: 10Mi + memory: 25Mi cpu: 50m requests: - memory: 10Mi + memory: 25Mi cpu: 50m --- timeout: "600s" expectedEvents: - submitted: - - succeeded: \ No newline at end of file + - succeeded: diff --git a/third_party/airflow/pyproject.toml b/third_party/airflow/pyproject.toml index b13c2eac309..8a0ffec1c0c 100644 --- a/third_party/airflow/pyproject.toml +++ b/third_party/airflow/pyproject.toml @@ -12,7 +12,8 @@ dependencies = [ "apache-airflow>=2.6.3", "grpcio==1.58.0", "grpcio-tools==1.58.0", - "types-protobuf==4.24.0.1" + "types-protobuf==4.24.0.1", + "protobuf>=3.20,<5.0" ] authors = [{name = "Armada-GROSS", email = "armada@armadaproject.io"}] license = { text = "Apache Software License" } diff --git a/tools.yaml b/tools.yaml index 6a04fd8b55b..9335b7e03b3 100644 --- a/tools.yaml +++ b/tools.yaml @@ -12,4 +12,4 @@ tools: - github.com/mitchellh/gox@v1.0.1 - github.com/wlbr/templify@v0.0.0-20210816202250-7b8044ca19e9 - golang.org/x/tools/cmd/goimports@v0.5.0 -- sigs.k8s.io/kind@v0.14.0 +- sigs.k8s.io/kind@v0.23.0